summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjbj <devnull@localhost>2001-10-15 03:47:21 +0000
committerjbj <devnull@localhost>2001-10-15 03:47:21 +0000
commitdb7110722d3317cc81dd11c104dd45ac8c328e20 (patch)
treeacc2fe412f5d1f7e3d3fd35993fd3fd5cc20a7f3
parent033e2a186a797374caeb2295a00dee5eef202ffc (diff)
downloadlibrpm-tizen-db7110722d3317cc81dd11c104dd45ac8c328e20.tar.gz
librpm-tizen-db7110722d3317cc81dd11c104dd45ac8c328e20.tar.bz2
librpm-tizen-db7110722d3317cc81dd11c104dd45ac8c328e20.zip
Initial revision
CVS patchset: 5110 CVS date: 2001/10/15 03:47:21
-rw-r--r--db/build_vxworks/BerkeleyDB/Makefile.component2060
-rw-r--r--db/build_vxworks/BerkeleyDB/Makefile.custom51
-rw-r--r--db/build_vxworks/BerkeleyDB/component.cdf1099
-rw-r--r--db/build_vxworks/BerkeleyDB/component.wpj6139
-rw-r--r--db/build_vxworks/db_archive/db_archive.c181
-rw-r--r--db/build_vxworks/db_archive/db_archive.wpj161
-rw-r--r--db/build_vxworks/db_archive/db_archive/Makefile.component433
-rw-r--r--db/build_vxworks/db_archive/db_archive/Makefile.custom51
-rw-r--r--db/build_vxworks/db_archive/db_archive/component.cdf30
-rw-r--r--db/build_vxworks/db_archive/db_archive/component.wpj615
-rw-r--r--db/build_vxworks/db_checkpoint/db_checkpoint.c253
-rw-r--r--db/build_vxworks/db_checkpoint/db_checkpoint.wpj161
-rw-r--r--db/build_vxworks/db_checkpoint/db_checkpoint/Makefile.component433
-rw-r--r--db/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom51
-rw-r--r--db/build_vxworks/db_checkpoint/db_checkpoint/component.cdf30
-rw-r--r--db/build_vxworks/db_checkpoint/db_checkpoint/component.wpj615
-rw-r--r--db/build_vxworks/db_deadlock/db_deadlock.c249
-rw-r--r--db/build_vxworks/db_deadlock/db_deadlock.wpj161
-rw-r--r--db/build_vxworks/db_deadlock/db_deadlock/Makefile.component433
-rw-r--r--db/build_vxworks/db_deadlock/db_deadlock/Makefile.custom51
-rw-r--r--db/build_vxworks/db_deadlock/db_deadlock/component.cdf30
-rw-r--r--db/build_vxworks/db_deadlock/db_deadlock/component.wpj615
-rw-r--r--db/build_vxworks/db_dump/db_dump.c601
-rw-r--r--db/build_vxworks/db_dump/db_dump.wpj161
-rw-r--r--db/build_vxworks/db_dump/db_dump/Makefile.component433
-rw-r--r--db/build_vxworks/db_dump/db_dump/Makefile.custom51
-rw-r--r--db/build_vxworks/db_dump/db_dump/component.cdf30
-rw-r--r--db/build_vxworks/db_dump/db_dump/component.wpj615
-rw-r--r--db/build_vxworks/db_load/db_load.c1132
-rw-r--r--db/build_vxworks/db_load/db_load.wpj161
-rw-r--r--db/build_vxworks/db_load/db_load/Makefile.component433
-rw-r--r--db/build_vxworks/db_load/db_load/Makefile.custom51
-rw-r--r--db/build_vxworks/db_load/db_load/component.cdf30
-rw-r--r--db/build_vxworks/db_load/db_load/component.wpj615
-rw-r--r--db/build_vxworks/db_printlog/db_printlog.c238
-rw-r--r--db/build_vxworks/db_printlog/db_printlog.wpj161
-rw-r--r--db/build_vxworks/db_printlog/db_printlog/Makefile.component433
-rw-r--r--db/build_vxworks/db_printlog/db_printlog/Makefile.custom51
-rw-r--r--db/build_vxworks/db_printlog/db_printlog/component.cdf30
-rw-r--r--db/build_vxworks/db_printlog/db_printlog/component.wpj615
-rw-r--r--db/build_vxworks/db_recover/db_recover.c315
-rw-r--r--db/build_vxworks/db_recover/db_recover.wpj161
-rw-r--r--db/build_vxworks/db_recover/db_recover/Makefile.component433
-rw-r--r--db/build_vxworks/db_recover/db_recover/Makefile.custom51
-rw-r--r--db/build_vxworks/db_recover/db_recover/component.cdf30
-rw-r--r--db/build_vxworks/db_recover/db_recover/component.wpj615
-rw-r--r--db/build_vxworks/db_stat/db_stat.c1085
-rw-r--r--db/build_vxworks/db_stat/db_stat.wpj161
-rw-r--r--db/build_vxworks/db_stat/db_stat/Makefile.component433
-rw-r--r--db/build_vxworks/db_stat/db_stat/Makefile.custom51
-rw-r--r--db/build_vxworks/db_stat/db_stat/component.cdf30
-rw-r--r--db/build_vxworks/db_stat/db_stat/component.wpj615
-rw-r--r--db/build_vxworks/db_upgrade/db_upgrade.c190
-rw-r--r--db/build_vxworks/db_upgrade/db_upgrade.wpj161
-rw-r--r--db/build_vxworks/db_upgrade/db_upgrade/Makefile.component433
-rw-r--r--db/build_vxworks/db_upgrade/db_upgrade/Makefile.custom51
-rw-r--r--db/build_vxworks/db_upgrade/db_upgrade/component.cdf30
-rw-r--r--db/build_vxworks/db_upgrade/db_upgrade/component.wpj615
-rw-r--r--db/build_vxworks/db_verify/db_verify.c204
-rw-r--r--db/build_vxworks/db_verify/db_verify.wpj161
-rw-r--r--db/build_vxworks/db_verify/db_verify/Makefile.component433
-rw-r--r--db/build_vxworks/db_verify/db_verify/Makefile.custom51
-rw-r--r--db/build_vxworks/db_verify/db_verify/component.cdf30
-rw-r--r--db/build_vxworks/db_verify/db_verify/component.wpj615
-rwxr-xr-xdb/build_vxworks/demo/DBdemo.wpj162
-rw-r--r--db/build_vxworks/demo/README30
-rw-r--r--db/build_vxworks/demo/dbdemo.c130
-rw-r--r--db/build_vxworks/demo/demo/Makefile.component433
-rw-r--r--db/build_vxworks/demo/demo/Makefile.custom51
-rw-r--r--db/build_vxworks/demo/demo/component.cdf30
-rw-r--r--db/build_vxworks/demo/demo/component.wpj607
-rw-r--r--db/common/util_arg.c127
-rw-r--r--db/cxx/cxx_db.cpp822
-rw-r--r--db/cxx/cxx_dbc.cpp157
-rw-r--r--db/cxx/cxx_dbt.cpp60
-rw-r--r--db/cxx/cxx_env.cpp1045
-rw-r--r--db/cxx/cxx_logc.cpp67
-rw-r--r--db/dist/aclocal_java/ac_check_class.ac107
-rw-r--r--db/dist/aclocal_java/ac_check_classpath.ac23
-rw-r--r--db/dist/aclocal_java/ac_check_junit.ac54
-rw-r--r--db/dist/aclocal_java/ac_check_rqrd_class.ac26
-rw-r--r--db/dist/aclocal_java/ac_java_options.ac32
-rw-r--r--db/dist/aclocal_java/ac_jni_include_dirs.ac97
-rw-r--r--db/dist/aclocal_java/ac_prog_jar.ac36
-rw-r--r--db/dist/aclocal_java/ac_prog_java.ac77
-rw-r--r--db/dist/aclocal_java/ac_prog_java_works.ac97
-rw-r--r--db/dist/aclocal_java/ac_prog_javac.ac43
-rw-r--r--db/dist/aclocal_java/ac_prog_javac_works.ac35
-rw-r--r--db/dist/aclocal_java/ac_prog_javadoc.ac37
-rw-r--r--db/dist/aclocal_java/ac_prog_javah.ac26
-rw-r--r--db/dist/aclocal_java/ac_try_compile_java.ac39
-rw-r--r--db/dist/aclocal_java/ac_try_run_javac.ac40
-rw-r--r--db/dist/buildrel84
-rwxr-xr-xdb/dist/s_test85
-rw-r--r--db/dist/vx_ae.in/Makefile.component433
-rw-r--r--db/dist/vx_ae.in/Makefile.custom51
-rw-r--r--db/dist/vx_ae.in/component.cdf30
-rw-r--r--db/dist/vx_ae.in/component.wpj615
-rwxr-xr-xdb/dist/vx_buildcd119
-rw-r--r--db/dist/vx_setup/CONFIG.in10
-rw-r--r--db/dist/vx_setup/LICENSE.TXT3
-rw-r--r--db/dist/vx_setup/MESSAGES.TCL651
-rw-r--r--db/dist/vx_setup/README.in7
-rw-r--r--db/dist/vx_setup/SETUP.BMPbin0 -> 187962 bytes
-rw-r--r--db/dist/vx_setup/vx_allfile.in5
-rw-r--r--db/dist/vx_setup/vx_demofile.in3
-rw-r--r--db/dist/vx_setup/vx_setup.in13
-rw-r--r--db/dist/win_exports.in126
-rw-r--r--db/dist/wpj.in161
-rw-r--r--db/docs/api_c/env_set_timeout.html106
-rw-r--r--db/docs/api_c/lock_id_free.html60
-rw-r--r--db/docs/api_c/log_cursor.html95
-rw-r--r--db/docs/api_c/logc_close.html66
-rw-r--r--db/docs/api_c/logc_get.html109
-rw-r--r--db/docs/api_c/memp_fcreate.html65
-rw-r--r--db/docs/api_c/memp_set_clear_len.html65
-rw-r--r--db/docs/api_c/memp_set_fileid.html86
-rw-r--r--db/docs/api_c/memp_set_ftype.html66
-rw-r--r--db/docs/api_c/memp_set_lsn_offset.html64
-rw-r--r--db/docs/api_c/memp_set_pgcookie.html65
-rw-r--r--db/docs/api_c/txn_set_timeout.html80
-rw-r--r--db/docs/api_cxx/c_index.html166
-rw-r--r--db/docs/api_cxx/env_set_timeout.html110
-rw-r--r--db/docs/api_cxx/lock_id_free.html64
-rw-r--r--db/docs/api_cxx/log_cursor.html98
-rw-r--r--db/docs/api_cxx/logc_class.html57
-rw-r--r--db/docs/api_cxx/logc_close.html70
-rw-r--r--db/docs/api_cxx/logc_get.html113
-rw-r--r--db/docs/api_cxx/memp_fcreate.html69
-rw-r--r--db/docs/api_cxx/memp_set_clear_len.html69
-rw-r--r--db/docs/api_cxx/memp_set_fileid.html90
-rw-r--r--db/docs/api_cxx/memp_set_ftype.html70
-rw-r--r--db/docs/api_cxx/memp_set_lsn_offset.html68
-rw-r--r--db/docs/api_cxx/memp_set_pgcookie.html69
-rw-r--r--db/docs/api_cxx/txn_set_timeout.html84
-rw-r--r--db/docs/api_java/c_index.html144
-rw-r--r--db/docs/api_java/env_set_timeout.html104
-rw-r--r--db/docs/api_java/lock_id_free.html62
-rw-r--r--db/docs/api_java/log_cursor.html88
-rw-r--r--db/docs/api_java/logc_class.html57
-rw-r--r--db/docs/api_java/logc_close.html69
-rw-r--r--db/docs/api_java/logc_get.html112
-rw-r--r--db/docs/api_java/txn_set_timeout.html82
-rw-r--r--db/docs/ref/build_unix/macosx.html28
-rw-r--r--db/docs/ref/build_vxworks/introae.html137
-rw-r--r--db/docs/ref/lock/timeout.html58
-rw-r--r--db/docs/ref/rep/app.html73
-rw-r--r--db/docs/ref/rep/comm.html71
-rw-r--r--db/docs/ref/rep/elect.html71
-rw-r--r--db/docs/ref/rep/faq.html77
-rw-r--r--db/docs/ref/rep/id.html37
-rw-r--r--db/docs/ref/rep/init.html44
-rw-r--r--db/docs/ref/rep/intro.html57
-rw-r--r--db/docs/ref/rep/logonly.html22
-rw-r--r--db/docs/ref/rep/newsite.html43
-rw-r--r--db/docs/ref/rep/pri.html32
-rw-r--r--db/docs/ref/rep/trans.html101
-rw-r--r--db/docs/ref/transapp/hotfail.html82
-rw-r--r--db/docs/ref/upgrade.4.0/deadlock.html25
-rw-r--r--db/docs/ref/upgrade.4.0/disk.html25
-rw-r--r--db/docs/ref/upgrade.4.0/env.html81
-rw-r--r--db/docs/ref/upgrade.4.0/intro.html26
-rw-r--r--db/docs/ref/upgrade.4.0/java.html32
-rw-r--r--db/docs/ref/upgrade.4.0/lock.html45
-rw-r--r--db/docs/ref/upgrade.4.0/lock_id_free.html25
-rw-r--r--db/docs/ref/upgrade.4.0/log.html55
-rw-r--r--db/docs/ref/upgrade.4.0/mp.html65
-rw-r--r--db/docs/ref/upgrade.4.0/rpc.html26
-rw-r--r--db/docs/ref/upgrade.4.0/set_lk_max.html26
-rw-r--r--db/docs/ref/upgrade.4.0/toc.html35
-rw-r--r--db/docs/ref/upgrade.4.0/txn.html46
-rw-r--r--db/examples_c/bench_001.c341
-rw-r--r--db/examples_c/ex_repquote/ex_repquote.h67
-rw-r--r--db/examples_c/ex_repquote/ex_rq_client.c285
-rw-r--r--db/examples_c/ex_repquote/ex_rq_main.c310
-rw-r--r--db/examples_c/ex_repquote/ex_rq_master.c159
-rw-r--r--db/examples_c/ex_repquote/ex_rq_net.c724
-rw-r--r--db/examples_c/ex_repquote/ex_rq_util.c439
-rw-r--r--db/include/rep.h144
-rw-r--r--db/include_auto/rep_ext.h26
-rw-r--r--db/include_auto/rep_ext.in42
-rw-r--r--db/java/src/com/sleepycat/db/DbLockNotGrantedException.java57
-rw-r--r--db/java/src/com/sleepycat/db/DbLockRequest.java66
-rw-r--r--db/java/src/com/sleepycat/db/DbLogc.java39
-rw-r--r--db/java/src/com/sleepycat/db/DbRepTransport.java19
-rw-r--r--db/libdb_java/com_sleepycat_db_DbLogc.h37
-rw-r--r--db/libdb_java/java_DbLogc.c107
-rw-r--r--db/os/os_clock.c93
-rw-r--r--db/os_vxworks/os_vx_abs.c45
-rw-r--r--db/os_vxworks/os_vx_finit.c31
-rw-r--r--db/os_vxworks/os_vx_map.c442
-rw-r--r--db/os_win32/os_clock.c40
-rw-r--r--db/perl/BerkeleyDB/BerkeleyDB.pm1231
-rw-r--r--db/perl/BerkeleyDB/BerkeleyDB.pod1760
-rw-r--r--db/perl/BerkeleyDB/BerkeleyDB.pod.P1527
-rw-r--r--db/perl/BerkeleyDB/BerkeleyDB.xs4058
-rw-r--r--db/perl/BerkeleyDB/BerkeleyDB/Btree.pm8
-rw-r--r--db/perl/BerkeleyDB/BerkeleyDB/Hash.pm8
-rw-r--r--db/perl/BerkeleyDB/Changes129
-rw-r--r--db/perl/BerkeleyDB/MANIFEST50
-rw-r--r--db/perl/BerkeleyDB/Makefile.PL123
-rw-r--r--db/perl/BerkeleyDB/README464
-rw-r--r--db/perl/BerkeleyDB/Todo57
-rw-r--r--db/perl/BerkeleyDB/config.in51
-rwxr-xr-xdb/perl/BerkeleyDB/dbinfo109
-rw-r--r--db/perl/BerkeleyDB/hints/irix_6_5.pl1
-rw-r--r--db/perl/BerkeleyDB/hints/solaris.pl1
-rw-r--r--db/perl/BerkeleyDB/mkconsts211
-rwxr-xr-xdb/perl/BerkeleyDB/mkpod146
-rw-r--r--db/perl/BerkeleyDB/patches/5.00444
-rw-r--r--db/perl/BerkeleyDB/patches/5.004_01217
-rw-r--r--db/perl/BerkeleyDB/patches/5.004_02217
-rw-r--r--db/perl/BerkeleyDB/patches/5.004_03223
-rw-r--r--db/perl/BerkeleyDB/patches/5.004_04209
-rw-r--r--db/perl/BerkeleyDB/patches/5.004_05209
-rw-r--r--db/perl/BerkeleyDB/patches/5.005209
-rw-r--r--db/perl/BerkeleyDB/patches/5.005_01209
-rw-r--r--db/perl/BerkeleyDB/patches/5.005_02264
-rw-r--r--db/perl/BerkeleyDB/patches/5.005_03250
-rw-r--r--db/perl/BerkeleyDB/patches/5.6.0294
-rw-r--r--db/perl/BerkeleyDB/t/btree.t925
-rw-r--r--db/perl/BerkeleyDB/t/db-3.0.t89
-rw-r--r--db/perl/BerkeleyDB/t/db-3.1.t123
-rw-r--r--db/perl/BerkeleyDB/t/db-3.2.t42
-rw-r--r--db/perl/BerkeleyDB/t/destroy.t101
-rw-r--r--db/perl/BerkeleyDB/t/env.t233
-rw-r--r--db/perl/BerkeleyDB/t/examples.t412
-rw-r--r--db/perl/BerkeleyDB/t/examples.t.T496
-rw-r--r--db/perl/BerkeleyDB/t/examples3.t132
-rw-r--r--db/perl/BerkeleyDB/t/examples3.t.T217
-rw-r--r--db/perl/BerkeleyDB/t/filter.t217
-rw-r--r--db/perl/BerkeleyDB/t/hash.t725
-rw-r--r--db/perl/BerkeleyDB/t/join.t225
-rw-r--r--db/perl/BerkeleyDB/t/mldbm.t159
-rw-r--r--db/perl/BerkeleyDB/t/queue.t746
-rw-r--r--db/perl/BerkeleyDB/t/recno.t877
-rw-r--r--db/perl/BerkeleyDB/t/strict.t172
-rw-r--r--db/perl/BerkeleyDB/t/subdb.t242
-rw-r--r--db/perl/BerkeleyDB/t/txn.t306
-rw-r--r--db/perl/BerkeleyDB/t/unknown.t176
-rw-r--r--db/perl/BerkeleyDB/t/util.pm180
-rw-r--r--db/perl/BerkeleyDB/typemap275
-rw-r--r--db/perl/DB_File/Changes368
-rw-r--r--db/perl/DB_File/DB_File.pm2274
-rw-r--r--db/perl/DB_File/DB_File.xs2122
-rw-r--r--db/perl/DB_File/DB_File_BS6
-rw-r--r--db/perl/DB_File/MANIFEST27
-rw-r--r--db/perl/DB_File/Makefile.PL196
-rw-r--r--db/perl/DB_File/README396
-rw-r--r--db/perl/DB_File/config.in99
-rw-r--r--db/perl/DB_File/dbinfo109
-rw-r--r--db/perl/DB_File/hints/dynixptx.pl3
-rw-r--r--db/perl/DB_File/hints/sco.pl2
-rw-r--r--db/perl/DB_File/patches/5.00444
-rw-r--r--db/perl/DB_File/patches/5.004_01217
-rw-r--r--db/perl/DB_File/patches/5.004_02217
-rw-r--r--db/perl/DB_File/patches/5.004_03223
-rw-r--r--db/perl/DB_File/patches/5.004_04209
-rw-r--r--db/perl/DB_File/patches/5.004_05209
-rw-r--r--db/perl/DB_File/patches/5.005209
-rw-r--r--db/perl/DB_File/patches/5.005_01209
-rw-r--r--db/perl/DB_File/patches/5.005_02264
-rw-r--r--db/perl/DB_File/patches/5.005_03250
-rw-r--r--db/perl/DB_File/patches/5.6.0294
-rw-r--r--db/perl/DB_File/t/db-btree.t1307
-rw-r--r--db/perl/DB_File/t/db-hash.t756
-rw-r--r--db/perl/DB_File/t/db-recno.t1254
-rw-r--r--db/perl/DB_File/typemap44
-rw-r--r--db/perl/DB_File/version.c82
-rw-r--r--db/rep/rep_method.c432
-rw-r--r--db/rep/rep_record.c1010
-rw-r--r--db/rep/rep_region.c147
-rw-r--r--db/rep/rep_util.c623
-rw-r--r--db/tcl/docs/rep.html50
-rw-r--r--db/test/dead006.tcl16
-rw-r--r--db/test/dead007.tcl35
-rw-r--r--db/test/env010.tcl49
-rw-r--r--db/test/lock004.tcl49
-rw-r--r--db/test/parallel.tcl214
-rw-r--r--db/test/recd15scr.tcl74
-rw-r--r--db/test/rep001.tcl214
-rw-r--r--db/test/reputils.tcl104
-rw-r--r--db/test/rsrc004.tcl52
-rw-r--r--db/test/scr001/chk.code37
-rw-r--r--db/test/scr002/chk.def64
-rw-r--r--db/test/scr003/chk.define76
-rw-r--r--db/test/scr004/chk.javafiles31
-rw-r--r--db/test/scr005/chk.nl100
-rw-r--r--db/test/scr006/chk.offt35
-rw-r--r--db/test/scr007/chk.proto44
-rw-r--r--db/test/scr008/chk.pubdef179
-rw-r--r--db/test/scr009/chk.srcfiles36
-rw-r--r--db/test/scr010/chk.str31
-rw-r--r--db/test/scr010/spell.ok723
-rw-r--r--db/test/scr011/chk.tags41
-rw-r--r--db/test/scr012/chk.vx_code62
-rw-r--r--db/test/scr013/chk.stats114
-rw-r--r--db/test/scr014/chk.err34
-rw-r--r--db/test/scr015/README36
-rw-r--r--db/test/scr015/TestConstruct01.cpp331
-rw-r--r--db/test/scr015/TestConstruct01.testerr4
-rw-r--r--db/test/scr015/TestConstruct01.testout27
-rw-r--r--db/test/scr015/TestExceptInclude.cpp25
-rw-r--r--db/test/scr015/TestGetSetMethods.cpp91
-rw-r--r--db/test/scr015/TestKeyRange.cpp171
-rw-r--r--db/test/scr015/TestKeyRange.testin8
-rw-r--r--db/test/scr015/TestKeyRange.testout19
-rw-r--r--db/test/scr015/TestLogc.cpp92
-rw-r--r--db/test/scr015/TestLogc.testout18
-rw-r--r--db/test/scr015/TestSimpleAccess.cpp67
-rw-r--r--db/test/scr015/TestSimpleAccess.testout3
-rw-r--r--db/test/scr015/TestTruncate.cpp84
-rw-r--r--db/test/scr015/TestTruncate.testout6
-rw-r--r--db/test/scr015/chk.cxxtests69
-rw-r--r--db/test/scr015/ignore4
-rw-r--r--db/test/scr015/testall32
-rw-r--r--db/test/scr015/testone121
-rw-r--r--db/test/scr016/CallbackTest.java83
-rw-r--r--db/test/scr016/CallbackTest.testout56
-rw-r--r--db/test/scr016/README37
-rw-r--r--db/test/scr016/TestAppendRecno.java259
-rw-r--r--db/test/scr016/TestAppendRecno.testout82
-rw-r--r--db/test/scr016/TestAssociate.java333
-rw-r--r--db/test/scr016/TestAssociate.testout30
-rw-r--r--db/test/scr016/TestClosedDb.java62
-rw-r--r--db/test/scr016/TestClosedDb.testout2
-rw-r--r--db/test/scr016/TestConstruct01.java474
-rw-r--r--db/test/scr016/TestConstruct01.testerr0
-rw-r--r--db/test/scr016/TestConstruct01.testout3
-rw-r--r--db/test/scr016/TestConstruct02.java367
-rw-r--r--db/test/scr016/TestConstruct02.testout3
-rw-r--r--db/test/scr016/TestDbtFlags.java237
-rw-r--r--db/test/scr016/TestDbtFlags.testerr36
-rw-r--r--db/test/scr016/TestDbtFlags.testout48
-rw-r--r--db/test/scr016/TestGetSetMethods.java99
-rw-r--r--db/test/scr016/TestKeyRange.java203
-rw-r--r--db/test/scr016/TestKeyRange.testout27
-rw-r--r--db/test/scr016/TestLockVec.java249
-rw-r--r--db/test/scr016/TestLockVec.testout8
-rw-r--r--db/test/scr016/TestLogc.java90
-rw-r--r--db/test/scr016/TestLogc.testout18
-rw-r--r--db/test/scr016/TestOpenEmpty.java189
-rw-r--r--db/test/scr016/TestOpenEmpty.testerr2
-rw-r--r--db/test/scr016/TestReplication.java289
-rw-r--r--db/test/scr016/TestRpcServer.java193
-rw-r--r--db/test/scr016/TestSameDbt.java56
-rw-r--r--db/test/scr016/TestSameDbt.testout2
-rw-r--r--db/test/scr016/TestSimpleAccess.java70
-rw-r--r--db/test/scr016/TestSimpleAccess.testout3
-rw-r--r--db/test/scr016/TestTruncate.java87
-rw-r--r--db/test/scr016/TestTruncate.testout6
-rw-r--r--db/test/scr016/chk.javatests78
-rw-r--r--db/test/scr016/ignore11
-rw-r--r--db/test/scr016/testall32
-rw-r--r--db/test/scr016/testone122
-rw-r--r--db/test/scr017/O.BH196
-rw-r--r--db/test/scr017/O.R196
-rw-r--r--db/test/scr017/chk.db18526
-rw-r--r--db/test/scr017/t.c188
-rw-r--r--db/test/shelltest.tcl83
-rw-r--r--db/txn/txn_method.c125
361 files changed, 82766 insertions, 0 deletions
diff --git a/db/build_vxworks/BerkeleyDB/Makefile.component b/db/build_vxworks/BerkeleyDB/Makefile.component
new file mode 100644
index 000000000..5b2492388
--- /dev/null
+++ b/db/build_vxworks/BerkeleyDB/Makefile.component
@@ -0,0 +1,2060 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = BerkeleyDB
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = PENTIUM2
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = bt_compare.o \
+ bt_conv.o \
+ bt_curadj.o \
+ bt_cursor.o \
+ bt_delete.o \
+ bt_method.o \
+ bt_open.o \
+ bt_put.o \
+ bt_rec.o \
+ bt_reclaim.o \
+ bt_recno.o \
+ bt_rsearch.o \
+ bt_search.o \
+ bt_split.o \
+ bt_stat.o \
+ bt_upgrade.o \
+ bt_verify.o \
+ btree_auto.o \
+ getopt.o \
+ snprintf.o \
+ strcasecmp.o \
+ vsnprintf.o \
+ db_byteorder.o \
+ db_err.o \
+ db_getlong.o \
+ db_log2.o \
+ util_log.o \
+ util_sig.o \
+ crdel_auto.o \
+ crdel_rec.o \
+ db.o \
+ db_am.o \
+ db_auto.o \
+ db_cam.o \
+ db_conv.o \
+ db_dispatch.o \
+ db_dup.o \
+ db_iface.o \
+ db_join.o \
+ db_meta.o \
+ db_method.o \
+ db_overflow.o \
+ db_pr.o \
+ db_rec.o \
+ db_reclaim.o \
+ db_ret.o \
+ db_upg.o \
+ db_upg_opd.o \
+ db_vrfy.o \
+ db_vrfyutil.o \
+ db_salloc.o \
+ db_shash.o \
+ env_method.o \
+ env_open.o \
+ env_recover.o \
+ env_region.o \
+ hash.o \
+ hash_auto.o \
+ hash_conv.o \
+ hash_dup.o \
+ hash_func.o \
+ hash_meta.o \
+ hash_method.o \
+ hash_page.o \
+ hash_rec.o \
+ hash_reclaim.o \
+ hash_stat.o \
+ hash_upgrade.o \
+ hash_verify.o \
+ hsearch.o \
+ lock.o \
+ lock_deadlock.o \
+ lock_method.o \
+ lock_region.o \
+ lock_stat.o \
+ lock_util.o \
+ log.o \
+ log_archive.o \
+ log_auto.o \
+ log_compare.o \
+ log_findckp.o \
+ log_get.o \
+ log_method.o \
+ log_put.o \
+ log_rec.o \
+ log_register.o \
+ mp_alloc.o \
+ mp_bh.o \
+ mp_fget.o \
+ mp_fopen.o \
+ mp_fput.o \
+ mp_fset.o \
+ mp_method.o \
+ mp_region.o \
+ mp_register.o \
+ mp_stat.o \
+ mp_sync.o \
+ mp_trickle.o \
+ mut_tas.o \
+ mutex.o \
+ os_alloc.o \
+ os_clock.o \
+ os_dir.o \
+ os_errno.o \
+ os_fid.o \
+ os_fsync.o \
+ os_handle.o \
+ os_method.o \
+ os_oflags.o \
+ os_open.o \
+ os_region.o \
+ os_rename.o \
+ os_root.o \
+ os_rpath.o \
+ os_rw.o \
+ os_seek.o \
+ os_sleep.o \
+ os_spin.o \
+ os_stat.o \
+ os_tmpdir.o \
+ os_unlink.o \
+ os_vx_abs.o \
+ os_vx_finit.o \
+ os_vx_map.o \
+ qam.o \
+ qam_auto.o \
+ qam_conv.o \
+ qam_files.o \
+ qam_method.o \
+ qam_open.o \
+ qam_rec.o \
+ qam_stat.o \
+ qam_upgrade.o \
+ qam_verify.o \
+ rep_method.o \
+ rep_record.o \
+ rep_region.o \
+ rep_util.o \
+ client.o \
+ db_server_clnt.o \
+ gen_client.o \
+ gen_client_ret.o \
+ db_server_xdr.o \
+ txn.o \
+ txn_auto.o \
+ txn_method.o \
+ txn_rec.o \
+ txn_recover.o \
+ txn_region.o \
+ txn_stat.o \
+ xa.o \
+ xa_db.o \
+ xa_map.o \
+ compConfig.o \
+ util_arg.o
+COMPONENT_OBJS = bt_compare.o \
+ bt_conv.o \
+ bt_curadj.o \
+ bt_cursor.o \
+ bt_delete.o \
+ bt_method.o \
+ bt_open.o \
+ bt_put.o \
+ bt_rec.o \
+ bt_reclaim.o \
+ bt_recno.o \
+ bt_rsearch.o \
+ bt_search.o \
+ bt_split.o \
+ bt_stat.o \
+ bt_upgrade.o \
+ bt_verify.o \
+ btree_auto.o \
+ client.o \
+ crdel_auto.o \
+ crdel_rec.o \
+ db.o \
+ db_am.o \
+ db_auto.o \
+ db_byteorder.o \
+ db_cam.o \
+ db_conv.o \
+ db_dispatch.o \
+ db_dup.o \
+ db_err.o \
+ db_getlong.o \
+ db_iface.o \
+ db_join.o \
+ db_log2.o \
+ db_meta.o \
+ db_method.o \
+ db_overflow.o \
+ db_pr.o \
+ db_rec.o \
+ db_reclaim.o \
+ db_ret.o \
+ db_salloc.o \
+ db_server_clnt.o \
+ db_server_xdr.o \
+ db_shash.o \
+ db_upg.o \
+ db_upg_opd.o \
+ db_vrfy.o \
+ db_vrfyutil.o \
+ env_method.o \
+ env_open.o \
+ env_recover.o \
+ env_region.o \
+ gen_client.o \
+ gen_client_ret.o \
+ getopt.o \
+ hash.o \
+ hash_auto.o \
+ hash_conv.o \
+ hash_dup.o \
+ hash_func.o \
+ hash_meta.o \
+ hash_method.o \
+ hash_page.o \
+ hash_rec.o \
+ hash_reclaim.o \
+ hash_stat.o \
+ hash_upgrade.o \
+ hash_verify.o \
+ hsearch.o \
+ lock.o \
+ lock_deadlock.o \
+ lock_method.o \
+ lock_region.o \
+ lock_stat.o \
+ lock_util.o \
+ log.o \
+ log_archive.o \
+ log_auto.o \
+ log_compare.o \
+ log_findckp.o \
+ log_get.o \
+ log_method.o \
+ log_put.o \
+ log_rec.o \
+ log_register.o \
+ mp_alloc.o \
+ mp_bh.o \
+ mp_fget.o \
+ mp_fopen.o \
+ mp_fput.o \
+ mp_fset.o \
+ mp_method.o \
+ mp_region.o \
+ mp_register.o \
+ mp_stat.o \
+ mp_sync.o \
+ mp_trickle.o \
+ mut_tas.o \
+ mutex.o \
+ os_alloc.o \
+ os_clock.o \
+ os_dir.o \
+ os_errno.o \
+ os_fid.o \
+ os_fsync.o \
+ os_handle.o \
+ os_method.o \
+ os_oflags.o \
+ os_open.o \
+ os_region.o \
+ os_rename.o \
+ os_root.o \
+ os_rpath.o \
+ os_rw.o \
+ os_seek.o \
+ os_sleep.o \
+ os_spin.o \
+ os_stat.o \
+ os_tmpdir.o \
+ os_unlink.o \
+ os_vx_abs.o \
+ os_vx_finit.o \
+ os_vx_map.o \
+ qam.o \
+ qam_auto.o \
+ qam_conv.o \
+ qam_files.o \
+ qam_method.o \
+ qam_open.o \
+ qam_rec.o \
+ qam_stat.o \
+ qam_upgrade.o \
+ qam_verify.o \
+ rep_method.o \
+ rep_record.o \
+ rep_region.o \
+ rep_util.o \
+ snprintf.o \
+ strcasecmp.o \
+ txn.o \
+ txn_auto.o \
+ txn_method.o \
+ txn_rec.o \
+ txn_recover.o \
+ txn_region.o \
+ txn_stat.o \
+ util_arg.o \
+ util_log.o \
+ util_sig.o \
+ vsnprintf.o \
+ xa.o \
+ xa_db.o \
+ xa_map.o
+DEPENDENCY_FILES = bt_compare.d \
+ bt_conv.d \
+ bt_curadj.d \
+ bt_cursor.d \
+ bt_delete.d \
+ bt_method.d \
+ bt_open.d \
+ bt_put.d \
+ bt_rec.d \
+ bt_reclaim.d \
+ bt_recno.d \
+ bt_rsearch.d \
+ bt_search.d \
+ bt_split.d \
+ bt_stat.d \
+ bt_upgrade.d \
+ bt_verify.d \
+ btree_auto.d \
+ getopt.d \
+ snprintf.d \
+ strcasecmp.d \
+ vsnprintf.d \
+ db_byteorder.d \
+ db_err.d \
+ db_getlong.d \
+ db_log2.d \
+ util_log.d \
+ util_sig.d \
+ crdel_auto.d \
+ crdel_rec.d \
+ db.d \
+ db_am.d \
+ db_auto.d \
+ db_cam.d \
+ db_conv.d \
+ db_dispatch.d \
+ db_dup.d \
+ db_iface.d \
+ db_join.d \
+ db_meta.d \
+ db_method.d \
+ db_overflow.d \
+ db_pr.d \
+ db_rec.d \
+ db_reclaim.d \
+ db_ret.d \
+ db_upg.d \
+ db_upg_opd.d \
+ db_vrfy.d \
+ db_vrfyutil.d \
+ db_salloc.d \
+ db_shash.d \
+ env_method.d \
+ env_open.d \
+ env_recover.d \
+ env_region.d \
+ hash.d \
+ hash_auto.d \
+ hash_conv.d \
+ hash_dup.d \
+ hash_func.d \
+ hash_meta.d \
+ hash_method.d \
+ hash_page.d \
+ hash_rec.d \
+ hash_reclaim.d \
+ hash_stat.d \
+ hash_upgrade.d \
+ hash_verify.d \
+ hsearch.d \
+ lock.d \
+ lock_deadlock.d \
+ lock_method.d \
+ lock_region.d \
+ lock_stat.d \
+ lock_util.d \
+ log.d \
+ log_archive.d \
+ log_auto.d \
+ log_compare.d \
+ log_findckp.d \
+ log_get.d \
+ log_method.d \
+ log_put.d \
+ log_rec.d \
+ log_register.d \
+ mp_alloc.d \
+ mp_bh.d \
+ mp_fget.d \
+ mp_fopen.d \
+ mp_fput.d \
+ mp_fset.d \
+ mp_method.d \
+ mp_region.d \
+ mp_register.d \
+ mp_stat.d \
+ mp_sync.d \
+ mp_trickle.d \
+ mut_tas.d \
+ mutex.d \
+ os_alloc.d \
+ os_clock.d \
+ os_dir.d \
+ os_errno.d \
+ os_fid.d \
+ os_fsync.d \
+ os_handle.d \
+ os_method.d \
+ os_oflags.d \
+ os_open.d \
+ os_region.d \
+ os_rename.d \
+ os_root.d \
+ os_rpath.d \
+ os_rw.d \
+ os_seek.d \
+ os_sleep.d \
+ os_spin.d \
+ os_stat.d \
+ os_tmpdir.d \
+ os_unlink.d \
+ os_vx_abs.d \
+ os_vx_finit.d \
+ os_vx_map.d \
+ qam.d \
+ qam_auto.d \
+ qam_conv.d \
+ qam_files.d \
+ qam_method.d \
+ qam_open.d \
+ qam_rec.d \
+ qam_stat.d \
+ qam_upgrade.d \
+ qam_verify.d \
+ rep_method.d \
+ rep_record.d \
+ rep_region.d \
+ rep_util.d \
+ client.d \
+ db_server_clnt.d \
+ gen_client.d \
+ gen_client_ret.d \
+ db_server_xdr.d \
+ txn.d \
+ txn_auto.d \
+ txn_method.d \
+ txn_rec.d \
+ txn_recover.d \
+ txn_region.d \
+ txn_stat.d \
+ xa.d \
+ xa_db.d \
+ xa_map.d \
+ compConfig.d \
+ util_arg.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = bt_compare.o \
+ bt_conv.o \
+ bt_curadj.o \
+ bt_cursor.o \
+ bt_delete.o \
+ bt_method.o \
+ bt_open.o \
+ bt_put.o \
+ bt_rec.o \
+ bt_reclaim.o \
+ bt_recno.o \
+ bt_rsearch.o \
+ bt_search.o \
+ bt_split.o \
+ bt_stat.o \
+ bt_upgrade.o \
+ bt_verify.o \
+ btree_auto.o \
+ getopt.o \
+ snprintf.o \
+ strcasecmp.o \
+ vsnprintf.o \
+ db_byteorder.o \
+ db_err.o \
+ db_getlong.o \
+ db_log2.o \
+ util_log.o \
+ util_sig.o \
+ crdel_auto.o \
+ crdel_rec.o \
+ db.o \
+ db_am.o \
+ db_auto.o \
+ db_cam.o \
+ db_conv.o \
+ db_dispatch.o \
+ db_dup.o \
+ db_iface.o \
+ db_join.o \
+ db_meta.o \
+ db_method.o \
+ db_overflow.o \
+ db_pr.o \
+ db_rec.o \
+ db_reclaim.o \
+ db_ret.o \
+ db_upg.o \
+ db_upg_opd.o \
+ db_vrfy.o \
+ db_vrfyutil.o \
+ db_salloc.o \
+ db_shash.o \
+ env_method.o \
+ env_open.o \
+ env_recover.o \
+ env_region.o \
+ hash.o \
+ hash_auto.o \
+ hash_conv.o \
+ hash_dup.o \
+ hash_func.o \
+ hash_meta.o \
+ hash_method.o \
+ hash_page.o \
+ hash_rec.o \
+ hash_reclaim.o \
+ hash_stat.o \
+ hash_upgrade.o \
+ hash_verify.o \
+ hsearch.o \
+ lock.o \
+ lock_deadlock.o \
+ lock_method.o \
+ lock_region.o \
+ lock_stat.o \
+ lock_util.o \
+ log.o \
+ log_archive.o \
+ log_auto.o \
+ log_compare.o \
+ log_findckp.o \
+ log_get.o \
+ log_method.o \
+ log_put.o \
+ log_rec.o \
+ log_register.o \
+ mp_alloc.o \
+ mp_bh.o \
+ mp_fget.o \
+ mp_fopen.o \
+ mp_fput.o \
+ mp_fset.o \
+ mp_method.o \
+ mp_region.o \
+ mp_register.o \
+ mp_stat.o \
+ mp_sync.o \
+ mp_trickle.o \
+ mut_tas.o \
+ mutex.o \
+ os_alloc.o \
+ os_clock.o \
+ os_dir.o \
+ os_errno.o \
+ os_fid.o \
+ os_fsync.o \
+ os_handle.o \
+ os_method.o \
+ os_oflags.o \
+ os_open.o \
+ os_region.o \
+ os_rename.o \
+ os_root.o \
+ os_rpath.o \
+ os_rw.o \
+ os_seek.o \
+ os_sleep.o \
+ os_spin.o \
+ os_stat.o \
+ os_tmpdir.o \
+ os_unlink.o \
+ os_vx_abs.o \
+ os_vx_finit.o \
+ os_vx_map.o \
+ qam.o \
+ qam_auto.o \
+ qam_conv.o \
+ qam_files.o \
+ qam_method.o \
+ qam_open.o \
+ qam_rec.o \
+ qam_stat.o \
+ qam_upgrade.o \
+ qam_verify.o \
+ rep_method.o \
+ rep_record.o \
+ rep_region.o \
+ rep_util.o \
+ client.o \
+ db_server_clnt.o \
+ gen_client.o \
+ gen_client_ret.o \
+ db_server_xdr.o \
+ txn.o \
+ txn_auto.o \
+ txn_method.o \
+ txn_rec.o \
+ txn_recover.o \
+ txn_region.o \
+ txn_stat.o \
+ xa.o \
+ xa_db.o \
+ xa_map.o \
+ compConfig.o \
+ util_arg.o
+COMPONENT_OBJS = bt_compare.o \
+ bt_conv.o \
+ bt_curadj.o \
+ bt_cursor.o \
+ bt_delete.o \
+ bt_method.o \
+ bt_open.o \
+ bt_put.o \
+ bt_rec.o \
+ bt_reclaim.o \
+ bt_recno.o \
+ bt_rsearch.o \
+ bt_search.o \
+ bt_split.o \
+ bt_stat.o \
+ bt_upgrade.o \
+ bt_verify.o \
+ btree_auto.o \
+ client.o \
+ crdel_auto.o \
+ crdel_rec.o \
+ db.o \
+ db_am.o \
+ db_auto.o \
+ db_byteorder.o \
+ db_cam.o \
+ db_conv.o \
+ db_dispatch.o \
+ db_dup.o \
+ db_err.o \
+ db_getlong.o \
+ db_iface.o \
+ db_join.o \
+ db_log2.o \
+ db_meta.o \
+ db_method.o \
+ db_overflow.o \
+ db_pr.o \
+ db_rec.o \
+ db_reclaim.o \
+ db_ret.o \
+ db_salloc.o \
+ db_server_clnt.o \
+ db_server_xdr.o \
+ db_shash.o \
+ db_upg.o \
+ db_upg_opd.o \
+ db_vrfy.o \
+ db_vrfyutil.o \
+ env_method.o \
+ env_open.o \
+ env_recover.o \
+ env_region.o \
+ gen_client.o \
+ gen_client_ret.o \
+ getopt.o \
+ hash.o \
+ hash_auto.o \
+ hash_conv.o \
+ hash_dup.o \
+ hash_func.o \
+ hash_meta.o \
+ hash_method.o \
+ hash_page.o \
+ hash_rec.o \
+ hash_reclaim.o \
+ hash_stat.o \
+ hash_upgrade.o \
+ hash_verify.o \
+ hsearch.o \
+ lock.o \
+ lock_deadlock.o \
+ lock_method.o \
+ lock_region.o \
+ lock_stat.o \
+ lock_util.o \
+ log.o \
+ log_archive.o \
+ log_auto.o \
+ log_compare.o \
+ log_findckp.o \
+ log_get.o \
+ log_method.o \
+ log_put.o \
+ log_rec.o \
+ log_register.o \
+ mp_alloc.o \
+ mp_bh.o \
+ mp_fget.o \
+ mp_fopen.o \
+ mp_fput.o \
+ mp_fset.o \
+ mp_method.o \
+ mp_region.o \
+ mp_register.o \
+ mp_stat.o \
+ mp_sync.o \
+ mp_trickle.o \
+ mut_tas.o \
+ mutex.o \
+ os_alloc.o \
+ os_clock.o \
+ os_dir.o \
+ os_errno.o \
+ os_fid.o \
+ os_fsync.o \
+ os_handle.o \
+ os_method.o \
+ os_oflags.o \
+ os_open.o \
+ os_region.o \
+ os_rename.o \
+ os_root.o \
+ os_rpath.o \
+ os_rw.o \
+ os_seek.o \
+ os_sleep.o \
+ os_spin.o \
+ os_stat.o \
+ os_tmpdir.o \
+ os_unlink.o \
+ os_vx_abs.o \
+ os_vx_finit.o \
+ os_vx_map.o \
+ qam.o \
+ qam_auto.o \
+ qam_conv.o \
+ qam_files.o \
+ qam_method.o \
+ qam_open.o \
+ qam_rec.o \
+ qam_stat.o \
+ qam_upgrade.o \
+ qam_verify.o \
+ rep_method.o \
+ rep_record.o \
+ rep_region.o \
+ rep_util.o \
+ snprintf.o \
+ strcasecmp.o \
+ txn.o \
+ txn_auto.o \
+ txn_method.o \
+ txn_rec.o \
+ txn_recover.o \
+ txn_region.o \
+ txn_stat.o \
+ util_arg.o \
+ util_log.o \
+ util_sig.o \
+ vsnprintf.o \
+ xa.o \
+ xa_db.o \
+ xa_map.o
+DEPENDENCY_FILES = bt_compare.d \
+ bt_conv.d \
+ bt_curadj.d \
+ bt_cursor.d \
+ bt_delete.d \
+ bt_method.d \
+ bt_open.d \
+ bt_put.d \
+ bt_rec.d \
+ bt_reclaim.d \
+ bt_recno.d \
+ bt_rsearch.d \
+ bt_search.d \
+ bt_split.d \
+ bt_stat.d \
+ bt_upgrade.d \
+ bt_verify.d \
+ btree_auto.d \
+ getopt.d \
+ snprintf.d \
+ strcasecmp.d \
+ vsnprintf.d \
+ db_byteorder.d \
+ db_err.d \
+ db_getlong.d \
+ db_log2.d \
+ util_log.d \
+ util_sig.d \
+ crdel_auto.d \
+ crdel_rec.d \
+ db.d \
+ db_am.d \
+ db_auto.d \
+ db_cam.d \
+ db_conv.d \
+ db_dispatch.d \
+ db_dup.d \
+ db_iface.d \
+ db_join.d \
+ db_meta.d \
+ db_method.d \
+ db_overflow.d \
+ db_pr.d \
+ db_rec.d \
+ db_reclaim.d \
+ db_ret.d \
+ db_upg.d \
+ db_upg_opd.d \
+ db_vrfy.d \
+ db_vrfyutil.d \
+ db_salloc.d \
+ db_shash.d \
+ env_method.d \
+ env_open.d \
+ env_recover.d \
+ env_region.d \
+ hash.d \
+ hash_auto.d \
+ hash_conv.d \
+ hash_dup.d \
+ hash_func.d \
+ hash_meta.d \
+ hash_method.d \
+ hash_page.d \
+ hash_rec.d \
+ hash_reclaim.d \
+ hash_stat.d \
+ hash_upgrade.d \
+ hash_verify.d \
+ hsearch.d \
+ lock.d \
+ lock_deadlock.d \
+ lock_method.d \
+ lock_region.d \
+ lock_stat.d \
+ lock_util.d \
+ log.d \
+ log_archive.d \
+ log_auto.d \
+ log_compare.d \
+ log_findckp.d \
+ log_get.d \
+ log_method.d \
+ log_put.d \
+ log_rec.d \
+ log_register.d \
+ mp_alloc.d \
+ mp_bh.d \
+ mp_fget.d \
+ mp_fopen.d \
+ mp_fput.d \
+ mp_fset.d \
+ mp_method.d \
+ mp_region.d \
+ mp_register.d \
+ mp_stat.d \
+ mp_sync.d \
+ mp_trickle.d \
+ mut_tas.d \
+ mutex.d \
+ os_alloc.d \
+ os_clock.d \
+ os_dir.d \
+ os_errno.d \
+ os_fid.d \
+ os_fsync.d \
+ os_handle.d \
+ os_method.d \
+ os_oflags.d \
+ os_open.d \
+ os_region.d \
+ os_rename.d \
+ os_root.d \
+ os_rpath.d \
+ os_rw.d \
+ os_seek.d \
+ os_sleep.d \
+ os_spin.d \
+ os_stat.d \
+ os_tmpdir.d \
+ os_unlink.d \
+ os_vx_abs.d \
+ os_vx_finit.d \
+ os_vx_map.d \
+ qam.d \
+ qam_auto.d \
+ qam_conv.d \
+ qam_files.d \
+ qam_method.d \
+ qam_open.d \
+ qam_rec.d \
+ qam_stat.d \
+ qam_upgrade.d \
+ qam_verify.d \
+ rep_method.d \
+ rep_record.d \
+ rep_region.d \
+ rep_util.d \
+ client.d \
+ db_server_clnt.d \
+ gen_client.d \
+ gen_client_ret.d \
+ db_server_xdr.d \
+ txn.d \
+ txn_auto.d \
+ txn_method.d \
+ txn_rec.d \
+ txn_recover.d \
+ txn_region.d \
+ txn_stat.d \
+ xa.d \
+ xa_db.d \
+ xa_map.d \
+ compConfig.d \
+ util_arg.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = bt_compare.o \
+ bt_conv.o \
+ bt_curadj.o \
+ bt_cursor.o \
+ bt_delete.o \
+ bt_method.o \
+ bt_open.o \
+ bt_put.o \
+ bt_rec.o \
+ bt_reclaim.o \
+ bt_recno.o \
+ bt_rsearch.o \
+ bt_search.o \
+ bt_split.o \
+ bt_stat.o \
+ bt_upgrade.o \
+ bt_verify.o \
+ btree_auto.o \
+ getopt.o \
+ snprintf.o \
+ strcasecmp.o \
+ vsnprintf.o \
+ db_byteorder.o \
+ db_err.o \
+ db_getlong.o \
+ db_log2.o \
+ util_log.o \
+ util_sig.o \
+ crdel_auto.o \
+ crdel_rec.o \
+ db.o \
+ db_am.o \
+ db_auto.o \
+ db_cam.o \
+ db_conv.o \
+ db_dispatch.o \
+ db_dup.o \
+ db_iface.o \
+ db_join.o \
+ db_meta.o \
+ db_method.o \
+ db_overflow.o \
+ db_pr.o \
+ db_rec.o \
+ db_reclaim.o \
+ db_ret.o \
+ db_upg.o \
+ db_upg_opd.o \
+ db_vrfy.o \
+ db_vrfyutil.o \
+ db_salloc.o \
+ db_shash.o \
+ env_method.o \
+ env_open.o \
+ env_recover.o \
+ env_region.o \
+ hash.o \
+ hash_auto.o \
+ hash_conv.o \
+ hash_dup.o \
+ hash_func.o \
+ hash_meta.o \
+ hash_method.o \
+ hash_page.o \
+ hash_rec.o \
+ hash_reclaim.o \
+ hash_stat.o \
+ hash_upgrade.o \
+ hash_verify.o \
+ hsearch.o \
+ lock.o \
+ lock_deadlock.o \
+ lock_method.o \
+ lock_region.o \
+ lock_stat.o \
+ lock_util.o \
+ log.o \
+ log_archive.o \
+ log_auto.o \
+ log_compare.o \
+ log_findckp.o \
+ log_get.o \
+ log_method.o \
+ log_put.o \
+ log_rec.o \
+ log_register.o \
+ mp_alloc.o \
+ mp_bh.o \
+ mp_fget.o \
+ mp_fopen.o \
+ mp_fput.o \
+ mp_fset.o \
+ mp_method.o \
+ mp_region.o \
+ mp_register.o \
+ mp_stat.o \
+ mp_sync.o \
+ mp_trickle.o \
+ mut_tas.o \
+ mutex.o \
+ os_alloc.o \
+ os_clock.o \
+ os_dir.o \
+ os_errno.o \
+ os_fid.o \
+ os_fsync.o \
+ os_handle.o \
+ os_method.o \
+ os_oflags.o \
+ os_open.o \
+ os_region.o \
+ os_rename.o \
+ os_root.o \
+ os_rpath.o \
+ os_rw.o \
+ os_seek.o \
+ os_sleep.o \
+ os_spin.o \
+ os_stat.o \
+ os_tmpdir.o \
+ os_unlink.o \
+ os_vx_abs.o \
+ os_vx_finit.o \
+ os_vx_map.o \
+ qam.o \
+ qam_auto.o \
+ qam_conv.o \
+ qam_files.o \
+ qam_method.o \
+ qam_open.o \
+ qam_rec.o \
+ qam_stat.o \
+ qam_upgrade.o \
+ qam_verify.o \
+ rep_method.o \
+ rep_record.o \
+ rep_region.o \
+ rep_util.o \
+ client.o \
+ db_server_clnt.o \
+ gen_client.o \
+ gen_client_ret.o \
+ db_server_xdr.o \
+ txn.o \
+ txn_auto.o \
+ txn_method.o \
+ txn_rec.o \
+ txn_recover.o \
+ txn_region.o \
+ txn_stat.o \
+ xa.o \
+ xa_db.o \
+ xa_map.o \
+ compConfig.o \
+ util_arg.o
+COMPONENT_OBJS = bt_compare.o \
+ bt_conv.o \
+ bt_curadj.o \
+ bt_cursor.o \
+ bt_delete.o \
+ bt_method.o \
+ bt_open.o \
+ bt_put.o \
+ bt_rec.o \
+ bt_reclaim.o \
+ bt_recno.o \
+ bt_rsearch.o \
+ bt_search.o \
+ bt_split.o \
+ bt_stat.o \
+ bt_upgrade.o \
+ bt_verify.o \
+ btree_auto.o \
+ client.o \
+ crdel_auto.o \
+ crdel_rec.o \
+ db.o \
+ db_am.o \
+ db_auto.o \
+ db_byteorder.o \
+ db_cam.o \
+ db_conv.o \
+ db_dispatch.o \
+ db_dup.o \
+ db_err.o \
+ db_getlong.o \
+ db_iface.o \
+ db_join.o \
+ db_log2.o \
+ db_meta.o \
+ db_method.o \
+ db_overflow.o \
+ db_pr.o \
+ db_rec.o \
+ db_reclaim.o \
+ db_ret.o \
+ db_salloc.o \
+ db_server_clnt.o \
+ db_server_xdr.o \
+ db_shash.o \
+ db_upg.o \
+ db_upg_opd.o \
+ db_vrfy.o \
+ db_vrfyutil.o \
+ env_method.o \
+ env_open.o \
+ env_recover.o \
+ env_region.o \
+ gen_client.o \
+ gen_client_ret.o \
+ getopt.o \
+ hash.o \
+ hash_auto.o \
+ hash_conv.o \
+ hash_dup.o \
+ hash_func.o \
+ hash_meta.o \
+ hash_method.o \
+ hash_page.o \
+ hash_rec.o \
+ hash_reclaim.o \
+ hash_stat.o \
+ hash_upgrade.o \
+ hash_verify.o \
+ hsearch.o \
+ lock.o \
+ lock_deadlock.o \
+ lock_method.o \
+ lock_region.o \
+ lock_stat.o \
+ lock_util.o \
+ log.o \
+ log_archive.o \
+ log_auto.o \
+ log_compare.o \
+ log_findckp.o \
+ log_get.o \
+ log_method.o \
+ log_put.o \
+ log_rec.o \
+ log_register.o \
+ mp_alloc.o \
+ mp_bh.o \
+ mp_fget.o \
+ mp_fopen.o \
+ mp_fput.o \
+ mp_fset.o \
+ mp_method.o \
+ mp_region.o \
+ mp_register.o \
+ mp_stat.o \
+ mp_sync.o \
+ mp_trickle.o \
+ mut_tas.o \
+ mutex.o \
+ os_alloc.o \
+ os_clock.o \
+ os_dir.o \
+ os_errno.o \
+ os_fid.o \
+ os_fsync.o \
+ os_handle.o \
+ os_method.o \
+ os_oflags.o \
+ os_open.o \
+ os_region.o \
+ os_rename.o \
+ os_root.o \
+ os_rpath.o \
+ os_rw.o \
+ os_seek.o \
+ os_sleep.o \
+ os_spin.o \
+ os_stat.o \
+ os_tmpdir.o \
+ os_unlink.o \
+ os_vx_abs.o \
+ os_vx_finit.o \
+ os_vx_map.o \
+ qam.o \
+ qam_auto.o \
+ qam_conv.o \
+ qam_files.o \
+ qam_method.o \
+ qam_open.o \
+ qam_rec.o \
+ qam_stat.o \
+ qam_upgrade.o \
+ qam_verify.o \
+ rep_method.o \
+ rep_record.o \
+ rep_region.o \
+ rep_util.o \
+ snprintf.o \
+ strcasecmp.o \
+ txn.o \
+ txn_auto.o \
+ txn_method.o \
+ txn_rec.o \
+ txn_recover.o \
+ txn_region.o \
+ txn_stat.o \
+ util_arg.o \
+ util_log.o \
+ util_sig.o \
+ vsnprintf.o \
+ xa.o \
+ xa_db.o \
+ xa_map.o
+DEPENDENCY_FILES = bt_compare.d \
+ bt_conv.d \
+ bt_curadj.d \
+ bt_cursor.d \
+ bt_delete.d \
+ bt_method.d \
+ bt_open.d \
+ bt_put.d \
+ bt_rec.d \
+ bt_reclaim.d \
+ bt_recno.d \
+ bt_rsearch.d \
+ bt_search.d \
+ bt_split.d \
+ bt_stat.d \
+ bt_upgrade.d \
+ bt_verify.d \
+ btree_auto.d \
+ getopt.d \
+ snprintf.d \
+ strcasecmp.d \
+ vsnprintf.d \
+ db_byteorder.d \
+ db_err.d \
+ db_getlong.d \
+ db_log2.d \
+ util_log.d \
+ util_sig.d \
+ crdel_auto.d \
+ crdel_rec.d \
+ db.d \
+ db_am.d \
+ db_auto.d \
+ db_cam.d \
+ db_conv.d \
+ db_dispatch.d \
+ db_dup.d \
+ db_iface.d \
+ db_join.d \
+ db_meta.d \
+ db_method.d \
+ db_overflow.d \
+ db_pr.d \
+ db_rec.d \
+ db_reclaim.d \
+ db_ret.d \
+ db_upg.d \
+ db_upg_opd.d \
+ db_vrfy.d \
+ db_vrfyutil.d \
+ db_salloc.d \
+ db_shash.d \
+ env_method.d \
+ env_open.d \
+ env_recover.d \
+ env_region.d \
+ hash.d \
+ hash_auto.d \
+ hash_conv.d \
+ hash_dup.d \
+ hash_func.d \
+ hash_meta.d \
+ hash_method.d \
+ hash_page.d \
+ hash_rec.d \
+ hash_reclaim.d \
+ hash_stat.d \
+ hash_upgrade.d \
+ hash_verify.d \
+ hsearch.d \
+ lock.d \
+ lock_deadlock.d \
+ lock_method.d \
+ lock_region.d \
+ lock_stat.d \
+ lock_util.d \
+ log.d \
+ log_archive.d \
+ log_auto.d \
+ log_compare.d \
+ log_findckp.d \
+ log_get.d \
+ log_method.d \
+ log_put.d \
+ log_rec.d \
+ log_register.d \
+ mp_alloc.d \
+ mp_bh.d \
+ mp_fget.d \
+ mp_fopen.d \
+ mp_fput.d \
+ mp_fset.d \
+ mp_method.d \
+ mp_region.d \
+ mp_register.d \
+ mp_stat.d \
+ mp_sync.d \
+ mp_trickle.d \
+ mut_tas.d \
+ mutex.d \
+ os_alloc.d \
+ os_clock.d \
+ os_dir.d \
+ os_errno.d \
+ os_fid.d \
+ os_fsync.d \
+ os_handle.d \
+ os_method.d \
+ os_oflags.d \
+ os_open.d \
+ os_region.d \
+ os_rename.d \
+ os_root.d \
+ os_rpath.d \
+ os_rw.d \
+ os_seek.d \
+ os_sleep.d \
+ os_spin.d \
+ os_stat.d \
+ os_tmpdir.d \
+ os_unlink.d \
+ os_vx_abs.d \
+ os_vx_finit.d \
+ os_vx_map.d \
+ qam.d \
+ qam_auto.d \
+ qam_conv.d \
+ qam_files.d \
+ qam_method.d \
+ qam_open.d \
+ qam_rec.d \
+ qam_stat.d \
+ qam_upgrade.d \
+ qam_verify.d \
+ rep_method.d \
+ rep_record.d \
+ rep_region.d \
+ rep_util.d \
+ client.d \
+ db_server_clnt.d \
+ gen_client.d \
+ gen_client_ret.d \
+ db_server_xdr.d \
+ txn.d \
+ txn_auto.d \
+ txn_method.d \
+ txn_rec.d \
+ txn_recover.d \
+ txn_region.d \
+ txn_stat.d \
+ xa.d \
+ xa_db.d \
+ xa_map.d \
+ compConfig.d \
+ util_arg.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/../../txn/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../txn/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../log/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../log/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../mp/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../mp/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../btree/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../btree/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../common/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../common/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../os/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../os/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../mutex/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../mutex/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../rpc_client/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../rpc_client/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../rpc_server/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../rpc_server/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../env/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../env/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../hsearch/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../hsearch/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../clib/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../clib/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../rep/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../rep/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../qam/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../qam/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../db/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../db/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../hash/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../hash/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../os_vxworks/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../os_vxworks/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../xa/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../xa/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../lock/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../lock/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/../../txn/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../txn/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../log/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../log/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../mp/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../mp/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../btree/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../btree/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../common/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../common/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../os/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../os/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../mutex/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../mutex/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../rpc_client/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../rpc_client/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../rpc_server/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../rpc_server/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../env/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../env/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../hsearch/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../hsearch/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../clib/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../clib/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../rep/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../rep/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../qam/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../qam/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../db/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../db/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../hash/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../hash/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../os_vxworks/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../os_vxworks/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../xa/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../xa/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+%.o : $(PRJ_DIR)/../../lock/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC -c $<
+
+%.d : $(PRJ_DIR)/../../lock/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -DDEBUG -DDIAGNOSTIC $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/../../txn/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../txn/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../log/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../log/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../mp/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../mp/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../btree/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../btree/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../common/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../common/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../os/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../os/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../mutex/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../mutex/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../rpc_client/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../rpc_client/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../rpc_server/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../rpc_server/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../env/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../env/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../hsearch/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../hsearch/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../clib/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../clib/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../rep/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../rep/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../qam/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../qam/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../db/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../db/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../hash/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../hash/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../os_vxworks/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../os_vxworks/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../xa/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../xa/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+%.o : $(PRJ_DIR)/../../lock/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto -c $<
+
+%.d : $(PRJ_DIR)/../../lock/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2 -I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+BerkeleyDB.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all BerkeleyDB modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > BerkeleyDB_ctdt.c
+ $(COMPILE_TRADITIONAL) BerkeleyDB_ctdt.c -o BerkeleyDB_ctdt.o
+ $(LD) -r -o BerkeleyDB.tmp $@ BerkeleyDB_ctdt.o
+ $(RM) $@
+ $(MV) BerkeleyDB.tmp $@
+#
+# Adds entry point table section to BerkeleyDB component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all BerkeleyDB modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+BerkeleyDB.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+BerkeleyDB.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+ $(NM) bt_compare.o bt_conv.o bt_curadj.o bt_cursor.o bt_delete.o bt_method.o bt_open.o bt_put.o bt_rec.o bt_reclaim.o bt_recno.o bt_rsearch.o bt_search.o bt_split.o bt_stat.o bt_upgrade.o bt_verify.o btree_auto.o client.o crdel_auto.o crdel_rec.o db.o db_am.o db_auto.o db_byteorder.o db_cam.o db_conv.o db_dispatch.o db_dup.o db_err.o db_getlong.o db_iface.o db_join.o db_log2.o db_meta.o db_method.o db_overflow.o db_pr.o db_rec.o db_reclaim.o db_ret.o db_salloc.o db_server_clnt.o db_server_xdr.o db_shash.o db_upg.o db_upg_opd.o db_vrfy.o db_vrfyutil.o env_method.o env_open.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+ $(NM) env_recover.o env_region.o gen_client.o gen_client_ret.o getopt.o hash.o hash_auto.o hash_conv.o hash_dup.o hash_func.o hash_meta.o hash_method.o hash_page.o hash_rec.o hash_reclaim.o hash_stat.o hash_upgrade.o hash_verify.o hsearch.o lock.o lock_deadlock.o lock_method.o lock_region.o lock_stat.o lock_util.o log.o log_archive.o log_auto.o log_compare.o log_findckp.o log_get.o log_method.o log_put.o log_rec.o log_register.o mp_alloc.o mp_bh.o mp_fget.o mp_fopen.o mp_fput.o mp_fset.o mp_method.o mp_region.o mp_register.o mp_stat.o mp_sync.o mp_trickle.o mut_tas.o mutex.o os_alloc.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+ $(NM) os_clock.o os_dir.o os_errno.o os_fid.o os_fsync.o os_handle.o os_method.o os_oflags.o os_open.o os_region.o os_rename.o os_root.o os_rpath.o os_rw.o os_seek.o os_sleep.o os_spin.o os_stat.o os_tmpdir.o os_unlink.o os_vx_abs.o os_vx_finit.o os_vx_map.o qam.o qam_auto.o qam_conv.o qam_files.o qam_method.o qam_open.o qam_rec.o qam_stat.o qam_upgrade.o qam_verify.o rep_method.o rep_record.o rep_region.o rep_util.o snprintf.o strcasecmp.o txn.o txn_auto.o txn_method.o txn_rec.o txn_recover.o txn_region.o txn_stat.o util_arg.o util_log.o util_sig.o vsnprintf.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+ $(NM) xa.o xa_db.o xa_map.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) bt_compare.o bt_conv.o bt_curadj.o bt_cursor.o bt_delete.o bt_method.o bt_open.o bt_put.o bt_rec.o bt_reclaim.o bt_recno.o bt_rsearch.o bt_search.o bt_split.o bt_stat.o bt_upgrade.o bt_verify.o btree_auto.o client.o crdel_auto.o crdel_rec.o db.o db_am.o db_auto.o db_byteorder.o db_cam.o db_conv.o db_dispatch.o db_dup.o db_err.o db_getlong.o db_iface.o db_join.o db_log2.o db_meta.o db_method.o db_overflow.o db_pr.o db_rec.o db_reclaim.o db_ret.o db_salloc.o db_server_clnt.o db_server_xdr.o db_shash.o db_upg.o db_upg_opd.o db_vrfy.o db_vrfyutil.o env_method.o env_open.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+ $(SIZE) env_recover.o env_region.o gen_client.o gen_client_ret.o getopt.o hash.o hash_auto.o hash_conv.o hash_dup.o hash_func.o hash_meta.o hash_method.o hash_page.o hash_rec.o hash_reclaim.o hash_stat.o hash_upgrade.o hash_verify.o hsearch.o lock.o lock_deadlock.o lock_method.o lock_region.o lock_stat.o lock_util.o log.o log_archive.o log_auto.o log_compare.o log_findckp.o log_get.o log_method.o log_put.o log_rec.o log_register.o mp_alloc.o mp_bh.o mp_fget.o mp_fopen.o mp_fput.o mp_fset.o mp_method.o mp_region.o mp_register.o mp_stat.o mp_sync.o mp_trickle.o mut_tas.o mutex.o os_alloc.o | sed 1d | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+ $(SIZE) os_clock.o os_dir.o os_errno.o os_fid.o os_fsync.o os_handle.o os_method.o os_oflags.o os_open.o os_region.o os_rename.o os_root.o os_rpath.o os_rw.o os_seek.o os_sleep.o os_spin.o os_stat.o os_tmpdir.o os_unlink.o os_vx_abs.o os_vx_finit.o os_vx_map.o qam.o qam_auto.o qam_conv.o qam_files.o qam_method.o qam_open.o qam_rec.o qam_stat.o qam_upgrade.o qam_verify.o rep_method.o rep_record.o rep_region.o rep_util.o snprintf.o strcasecmp.o txn.o txn_auto.o txn_method.o txn_rec.o txn_recover.o txn_region.o txn_stat.o util_arg.o util_log.o util_sig.o vsnprintf.o | sed 1d | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+ $(SIZE) xa.o xa_db.o xa_map.o | sed 1d | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/BerkeleyDB/Makefile.custom b/db/build_vxworks/BerkeleyDB/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/BerkeleyDB/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/BerkeleyDB/component.cdf b/db/build_vxworks/BerkeleyDB/component.cdf
new file mode 100644
index 000000000..e383c9928
--- /dev/null
+++ b/db/build_vxworks/BerkeleyDB/component.cdf
@@ -0,0 +1,1099 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_BERKELEYDB {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES bt_compare.o \
+ bt_conv.o \
+ bt_curadj.o \
+ bt_cursor.o \
+ bt_delete.o \
+ bt_method.o \
+ bt_open.o \
+ bt_put.o \
+ bt_rec.o \
+ bt_reclaim.o \
+ bt_recno.o \
+ bt_rsearch.o \
+ bt_search.o \
+ bt_split.o \
+ bt_stat.o \
+ bt_upgrade.o \
+ bt_verify.o \
+ btree_auto.o \
+ client.o \
+ crdel_auto.o \
+ crdel_rec.o \
+ db.o \
+ db_am.o \
+ db_auto.o \
+ db_byteorder.o \
+ db_cam.o \
+ db_conv.o \
+ db_dispatch.o \
+ db_dup.o \
+ db_err.o \
+ db_getlong.o \
+ db_iface.o \
+ db_join.o \
+ db_log2.o \
+ db_meta.o \
+ db_method.o \
+ db_overflow.o \
+ db_pr.o \
+ db_rec.o \
+ db_reclaim.o \
+ db_ret.o \
+ db_salloc.o \
+ db_server_clnt.o \
+ db_server_xdr.o \
+ db_shash.o \
+ db_upg.o \
+ db_upg_opd.o \
+ db_vrfy.o \
+ db_vrfyutil.o \
+ env_method.o \
+ env_open.o \
+ env_recover.o \
+ env_region.o \
+ gen_client.o \
+ gen_client_ret.o \
+ getopt.o \
+ hash.o \
+ hash_auto.o \
+ hash_conv.o \
+ hash_dup.o \
+ hash_func.o \
+ hash_meta.o \
+ hash_method.o \
+ hash_page.o \
+ hash_rec.o \
+ hash_reclaim.o \
+ hash_stat.o \
+ hash_upgrade.o \
+ hash_verify.o \
+ hsearch.o \
+ lock.o \
+ lock_deadlock.o \
+ lock_method.o \
+ lock_region.o \
+ lock_stat.o \
+ lock_util.o \
+ log.o \
+ log_archive.o \
+ log_auto.o \
+ log_compare.o \
+ log_findckp.o \
+ log_get.o \
+ log_method.o \
+ log_put.o \
+ log_rec.o \
+ log_register.o \
+ mp_alloc.o \
+ mp_bh.o \
+ mp_fget.o \
+ mp_fopen.o \
+ mp_fput.o \
+ mp_fset.o \
+ mp_method.o \
+ mp_region.o \
+ mp_register.o \
+ mp_stat.o \
+ mp_sync.o \
+ mp_trickle.o \
+ mut_tas.o \
+ mutex.o \
+ os_alloc.o \
+ os_clock.o \
+ os_dir.o \
+ os_errno.o \
+ os_fid.o \
+ os_fsync.o \
+ os_handle.o \
+ os_method.o \
+ os_oflags.o \
+ os_open.o \
+ os_region.o \
+ os_rename.o \
+ os_root.o \
+ os_rpath.o \
+ os_rw.o \
+ os_seek.o \
+ os_sleep.o \
+ os_spin.o \
+ os_stat.o \
+ os_tmpdir.o \
+ os_unlink.o \
+ os_vx_abs.o \
+ os_vx_finit.o \
+ os_vx_map.o \
+ qam.o \
+ qam_auto.o \
+ qam_conv.o \
+ qam_files.o \
+ qam_method.o \
+ qam_open.o \
+ qam_rec.o \
+ qam_stat.o \
+ qam_upgrade.o \
+ qam_verify.o \
+ rep_method.o \
+ rep_record.o \
+ rep_region.o \
+ rep_util.o \
+ snprintf.o \
+ strcasecmp.o \
+ txn.o \
+ txn_auto.o \
+ txn_method.o \
+ txn_rec.o \
+ txn_recover.o \
+ txn_region.o \
+ txn_stat.o \
+ util_arg.o \
+ util_log.o \
+ util_sig.o \
+ vsnprintf.o \
+ xa.o \
+ xa_db.o \
+ xa_map.o
+ NAME BerkeleyDB
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module bt_compare.o {
+
+ NAME bt_compare.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_compare.c
+}
+
+Module bt_conv.o {
+
+ NAME bt_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_conv.c
+}
+
+Module bt_curadj.o {
+
+ NAME bt_curadj.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_curadj.c
+}
+
+Module bt_cursor.o {
+
+ NAME bt_cursor.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_cursor.c
+}
+
+Module bt_delete.o {
+
+ NAME bt_delete.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_delete.c
+}
+
+Module bt_method.o {
+
+ NAME bt_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_method.c
+}
+
+Module bt_open.o {
+
+ NAME bt_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_open.c
+}
+
+Module bt_put.o {
+
+ NAME bt_put.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_put.c
+}
+
+Module bt_rec.o {
+
+ NAME bt_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_rec.c
+}
+
+Module bt_reclaim.o {
+
+ NAME bt_reclaim.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_reclaim.c
+}
+
+Module bt_recno.o {
+
+ NAME bt_recno.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_recno.c
+}
+
+Module bt_rsearch.o {
+
+ NAME bt_rsearch.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_rsearch.c
+}
+
+Module bt_search.o {
+
+ NAME bt_search.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_search.c
+}
+
+Module bt_split.o {
+
+ NAME bt_split.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_split.c
+}
+
+Module bt_stat.o {
+
+ NAME bt_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_stat.c
+}
+
+Module bt_upgrade.o {
+
+ NAME bt_upgrade.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_upgrade.c
+}
+
+Module bt_verify.o {
+
+ NAME bt_verify.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/bt_verify.c
+}
+
+Module btree_auto.o {
+
+ NAME btree_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../btree/btree_auto.c
+}
+
+Module client.o {
+
+ NAME client.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/client.c
+}
+
+Module crdel_auto.o {
+
+ NAME crdel_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/crdel_auto.c
+}
+
+Module crdel_rec.o {
+
+ NAME crdel_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/crdel_rec.c
+}
+
+Module db.o {
+
+ NAME db.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db.c
+}
+
+Module db_am.o {
+
+ NAME db_am.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_am.c
+}
+
+Module db_auto.o {
+
+ NAME db_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_auto.c
+}
+
+Module db_byteorder.o {
+
+ NAME db_byteorder.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_byteorder.c
+}
+
+Module db_cam.o {
+
+ NAME db_cam.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_cam.c
+}
+
+Module db_conv.o {
+
+ NAME db_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_conv.c
+}
+
+Module db_dispatch.o {
+
+ NAME db_dispatch.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_dispatch.c
+}
+
+Module db_dup.o {
+
+ NAME db_dup.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_dup.c
+}
+
+Module db_err.o {
+
+ NAME db_err.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_err.c
+}
+
+Module db_getlong.o {
+
+ NAME db_getlong.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_getlong.c
+}
+
+Module db_iface.o {
+
+ NAME db_iface.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_iface.c
+}
+
+Module db_join.o {
+
+ NAME db_join.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_join.c
+}
+
+Module db_log2.o {
+
+ NAME db_log2.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/db_log2.c
+}
+
+Module db_meta.o {
+
+ NAME db_meta.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_meta.c
+}
+
+Module db_method.o {
+
+ NAME db_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_method.c
+}
+
+Module db_overflow.o {
+
+ NAME db_overflow.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_overflow.c
+}
+
+Module db_pr.o {
+
+ NAME db_pr.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_pr.c
+}
+
+Module db_rec.o {
+
+ NAME db_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_rec.c
+}
+
+Module db_reclaim.o {
+
+ NAME db_reclaim.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_reclaim.c
+}
+
+Module db_ret.o {
+
+ NAME db_ret.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_ret.c
+}
+
+Module db_salloc.o {
+
+ NAME db_salloc.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/db_salloc.c
+}
+
+Module db_server_clnt.o {
+
+ NAME db_server_clnt.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/db_server_clnt.c
+}
+
+Module db_server_xdr.o {
+
+ NAME db_server_xdr.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_server/db_server_xdr.c
+}
+
+Module db_shash.o {
+
+ NAME db_shash.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/db_shash.c
+}
+
+Module db_upg.o {
+
+ NAME db_upg.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_upg.c
+}
+
+Module db_upg_opd.o {
+
+ NAME db_upg_opd.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_upg_opd.c
+}
+
+Module db_vrfy.o {
+
+ NAME db_vrfy.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_vrfy.c
+}
+
+Module db_vrfyutil.o {
+
+ NAME db_vrfyutil.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../db/db_vrfyutil.c
+}
+
+Module env_method.o {
+
+ NAME env_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_method.c
+}
+
+Module env_open.o {
+
+ NAME env_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_open.c
+}
+
+Module env_recover.o {
+
+ NAME env_recover.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_recover.c
+}
+
+Module env_region.o {
+
+ NAME env_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../env/env_region.c
+}
+
+Module gen_client.o {
+
+ NAME gen_client.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/gen_client.c
+}
+
+Module gen_client_ret.o {
+
+ NAME gen_client_ret.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rpc_client/gen_client_ret.c
+}
+
+Module getopt.o {
+
+ NAME getopt.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/getopt.c
+}
+
+Module hash.o {
+
+ NAME hash.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash.c
+}
+
+Module hash_auto.o {
+
+ NAME hash_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_auto.c
+}
+
+Module hash_conv.o {
+
+ NAME hash_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_conv.c
+}
+
+Module hash_dup.o {
+
+ NAME hash_dup.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_dup.c
+}
+
+Module hash_func.o {
+
+ NAME hash_func.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_func.c
+}
+
+Module hash_meta.o {
+
+ NAME hash_meta.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_meta.c
+}
+
+Module hash_method.o {
+
+ NAME hash_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_method.c
+}
+
+Module hash_page.o {
+
+ NAME hash_page.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_page.c
+}
+
+Module hash_rec.o {
+
+ NAME hash_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_rec.c
+}
+
+Module hash_reclaim.o {
+
+ NAME hash_reclaim.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_reclaim.c
+}
+
+Module hash_stat.o {
+
+ NAME hash_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_stat.c
+}
+
+Module hash_upgrade.o {
+
+ NAME hash_upgrade.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_upgrade.c
+}
+
+Module hash_verify.o {
+
+ NAME hash_verify.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hash/hash_verify.c
+}
+
+Module hsearch.o {
+
+ NAME hsearch.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../hsearch/hsearch.c
+}
+
+Module lock.o {
+
+ NAME lock.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock.c
+}
+
+Module lock_deadlock.o {
+
+ NAME lock_deadlock.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_deadlock.c
+}
+
+Module lock_method.o {
+
+ NAME lock_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_method.c
+}
+
+Module lock_region.o {
+
+ NAME lock_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_region.c
+}
+
+Module lock_stat.o {
+
+ NAME lock_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_stat.c
+}
+
+Module lock_util.o {
+
+ NAME lock_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../lock/lock_util.c
+}
+
+Module log.o {
+
+ NAME log.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log.c
+}
+
+Module log_archive.o {
+
+ NAME log_archive.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_archive.c
+}
+
+Module log_auto.o {
+
+ NAME log_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_auto.c
+}
+
+Module log_compare.o {
+
+ NAME log_compare.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_compare.c
+}
+
+Module log_findckp.o {
+
+ NAME log_findckp.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_findckp.c
+}
+
+Module log_get.o {
+
+ NAME log_get.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_get.c
+}
+
+Module log_method.o {
+
+ NAME log_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_method.c
+}
+
+Module log_put.o {
+
+ NAME log_put.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_put.c
+}
+
+Module log_rec.o {
+
+ NAME log_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_rec.c
+}
+
+Module log_register.o {
+
+ NAME log_register.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../log/log_register.c
+}
+
+Module mp_alloc.o {
+
+ NAME mp_alloc.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_alloc.c
+}
+
+Module mp_bh.o {
+
+ NAME mp_bh.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_bh.c
+}
+
+Module mp_fget.o {
+
+ NAME mp_fget.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fget.c
+}
+
+Module mp_fopen.o {
+
+ NAME mp_fopen.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fopen.c
+}
+
+Module mp_fput.o {
+
+ NAME mp_fput.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fput.c
+}
+
+Module mp_fset.o {
+
+ NAME mp_fset.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_fset.c
+}
+
+Module mp_method.o {
+
+ NAME mp_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_method.c
+}
+
+Module mp_region.o {
+
+ NAME mp_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_region.c
+}
+
+Module mp_register.o {
+
+ NAME mp_register.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_register.c
+}
+
+Module mp_stat.o {
+
+ NAME mp_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_stat.c
+}
+
+Module mp_sync.o {
+
+ NAME mp_sync.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_sync.c
+}
+
+Module mp_trickle.o {
+
+ NAME mp_trickle.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mp/mp_trickle.c
+}
+
+Module mut_tas.o {
+
+ NAME mut_tas.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mutex/mut_tas.c
+}
+
+Module mutex.o {
+
+ NAME mutex.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../mutex/mutex.c
+}
+
+Module os_vx_abs.o {
+
+ NAME os_vx_abs.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os_vxworks/os_vx_abs.c
+}
+
+Module os_alloc.o {
+
+ NAME os_alloc.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_alloc.c
+}
+
+Module os_clock.o {
+
+ NAME os_clock.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_clock.c
+}
+
+Module os_dir.o {
+
+ NAME os_dir.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_dir.c
+}
+
+Module os_errno.o {
+
+ NAME os_errno.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_errno.c
+}
+
+Module os_fid.o {
+
+ NAME os_fid.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_fid.c
+}
+
+Module os_vx_finit.o {
+
+ NAME os_vx_finit.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os_vxworks/os_vx_finit.c
+}
+
+Module os_fsync.o {
+
+ NAME os_fsync.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_fsync.c
+}
+
+Module os_handle.o {
+
+ NAME os_handle.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_handle.c
+}
+
+Module os_vx_map.o {
+
+ NAME os_vx_map.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os_vxworks/os_vx_map.c
+}
+
+Module os_method.o {
+
+ NAME os_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_method.c
+}
+
+Module os_oflags.o {
+
+ NAME os_oflags.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_oflags.c
+}
+
+Module os_open.o {
+
+ NAME os_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_open.c
+}
+
+Module os_region.o {
+
+ NAME os_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_region.c
+}
+
+Module os_rename.o {
+
+ NAME os_rename.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_rename.c
+}
+
+Module os_root.o {
+
+ NAME os_root.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_root.c
+}
+
+Module os_rpath.o {
+
+ NAME os_rpath.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_rpath.c
+}
+
+Module os_rw.o {
+
+ NAME os_rw.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_rw.c
+}
+
+Module os_seek.o {
+
+ NAME os_seek.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_seek.c
+}
+
+Module os_sleep.o {
+
+ NAME os_sleep.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_sleep.c
+}
+
+Module os_spin.o {
+
+ NAME os_spin.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_spin.c
+}
+
+Module os_stat.o {
+
+ NAME os_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_stat.c
+}
+
+Module os_tmpdir.o {
+
+ NAME os_tmpdir.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_tmpdir.c
+}
+
+Module os_unlink.o {
+
+ NAME os_unlink.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../os/os_unlink.c
+}
+
+Module qam.o {
+
+ NAME qam.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam.c
+}
+
+Module qam_auto.o {
+
+ NAME qam_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_auto.c
+}
+
+Module qam_conv.o {
+
+ NAME qam_conv.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_conv.c
+}
+
+Module qam_files.o {
+
+ NAME qam_files.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_files.c
+}
+
+Module qam_method.o {
+
+ NAME qam_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_method.c
+}
+
+Module qam_open.o {
+
+ NAME qam_open.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_open.c
+}
+
+Module qam_rec.o {
+
+ NAME qam_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_rec.c
+}
+
+Module qam_stat.o {
+
+ NAME qam_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_stat.c
+}
+
+Module qam_upgrade.o {
+
+ NAME qam_upgrade.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_upgrade.c
+}
+
+Module qam_verify.o {
+
+ NAME qam_verify.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../qam/qam_verify.c
+}
+
+Module rep_method.o {
+
+ NAME rep_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_method.c
+}
+
+Module rep_record.o {
+
+ NAME rep_record.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_record.c
+}
+
+Module rep_region.o {
+
+ NAME rep_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_region.c
+}
+
+Module rep_util.o {
+
+ NAME rep_util.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../rep/rep_util.c
+}
+
+Module snprintf.o {
+
+ NAME snprintf.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/snprintf.c
+}
+
+Module strcasecmp.o {
+
+ NAME strcasecmp.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/strcasecmp.c
+}
+
+Module txn.o {
+
+ NAME txn.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn.c
+}
+
+Module txn_auto.o {
+
+ NAME txn_auto.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_auto.c
+}
+
+Module txn_method.o {
+
+ NAME txn_method.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_method.c
+}
+
+Module txn_rec.o {
+
+ NAME txn_rec.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_rec.c
+}
+
+Module txn_recover.o {
+
+ NAME txn_recover.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_recover.c
+}
+
+Module txn_region.o {
+
+ NAME txn_region.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_region.c
+}
+
+Module txn_stat.o {
+
+ NAME txn_stat.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../txn/txn_stat.c
+}
+
+Module util_log.o {
+
+ NAME util_log.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_log.c
+}
+
+Module util_sig.o {
+
+ NAME util_sig.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_sig.c
+}
+
+Module vsnprintf.o {
+
+ NAME vsnprintf.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../clib/vsnprintf.c
+}
+
+Module xa.o {
+
+ NAME xa.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../xa/xa.c
+}
+
+Module xa_db.o {
+
+ NAME xa_db.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../xa/xa_db.c
+}
+
+Module xa_map.o {
+
+ NAME xa_map.o
+ SRC_PATH_NAME $(PRJ_DIR)/../../xa/xa_map.c
+}
+
+Module util_arg.o {
+ SRC_PATH_NAME $(PRJ_DIR)/../../common/util_arg.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/BerkeleyDB/component.wpj b/db/build_vxworks/BerkeleyDB/component.wpj
new file mode 100644
index 000000000..4e416d156
--- /dev/null
+++ b/db/build_vxworks/BerkeleyDB/component.wpj
@@ -0,0 +1,6139 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.0
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_auto.c_objects
+log_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_findckp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_findckp.c_objects
+log_findckp.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_findckp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_rec.c_objects
+log_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_register.c_objects
+log_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../log/log_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_finit.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_finit.c_objects
+os_vx_finit.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_finit.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/db_server_xdr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/db_server_xdr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_search.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_search.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_split.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_split.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/getopt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/getopt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/snprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/snprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_err.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_err.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_getlong.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_getlong.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_log2.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/db_log2.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_arg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_arg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_sig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../common/util_sig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_am.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_am.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_cam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_cam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_iface.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_iface.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_join.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_join.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_overflow.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_overflow.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_pr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_pr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_salloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_salloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_shash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/db_shash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../env/env_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_func.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_func.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_page.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_page.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../lock/lock_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_auto.c_objects
+log_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_findckp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_findckp.c_objects
+log_findckp.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_findckp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_get.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_get.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_rec.c_objects
+log_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_register.c_objects
+log_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../log/log_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mutex.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../mutex/mutex.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_clock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_clock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_dir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_dir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_errno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_errno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fid.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fid.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fsync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_fsync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_handle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_handle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_oflags.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_oflags.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_root.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_root.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rpath.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rpath.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rw.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_rw.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_seek.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_seek.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_sleep.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_sleep.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_spin.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_spin.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_unlink.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os/os_unlink.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_finit.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_finit.c_objects
+os_vx_finit.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_finit.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_files.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_files.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_record.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_record.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rep/rep_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_server/db_server_xdr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_server/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../rpc_server/db_server_xdr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../../xa/xa_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2 \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_objects
+bt_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_objects
+bt_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_objects
+bt_curadj.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_curadj.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_objects
+bt_cursor.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_cursor.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_objects
+bt_delete.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_delete.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_objects
+bt_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_objects
+bt_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_objects
+bt_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_objects
+bt_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_objects
+bt_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_objects
+bt_recno.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_recno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_objects
+bt_rsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_rsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_objects
+bt_search.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_search.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_objects
+bt_split.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_split.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_objects
+bt_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_objects
+bt_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_objects
+bt_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/bt_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_objects
+btree_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../btree/btree_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_objects
+getopt.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/getopt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_objects
+snprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/snprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_objects
+strcasecmp.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/strcasecmp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_objects
+vsnprintf.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../clib/vsnprintf.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_objects
+db_byteorder.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_byteorder.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_objects
+db_err.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_err.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_objects
+db_getlong.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_getlong.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_objects
+db_log2.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/db_log2.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_objects
+util_arg.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_arg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_objects
+util_log.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_objects
+util_sig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../common/util_sig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_objects
+crdel_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_objects
+crdel_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/crdel_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_objects
+db.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_objects
+db_am.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_am.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_objects
+db_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_objects
+db_cam.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_cam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_objects
+db_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_objects
+db_dispatch.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dispatch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_objects
+db_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_objects
+db_iface.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_iface.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_objects
+db_join.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_join.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_objects
+db_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_objects
+db_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_objects
+db_overflow.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_overflow.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_objects
+db_pr.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_pr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_objects
+db_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_objects
+db_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_objects
+db_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_objects
+db_upg.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_objects
+db_upg_opd.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_upg_opd.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_objects
+db_vrfy.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfy.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_objects
+db_vrfyutil.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../db/db_vrfyutil.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_objects
+db_salloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_salloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_objects
+db_shash.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/db_shash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_objects
+env_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_objects
+env_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_objects
+env_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_objects
+env_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../env/env_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_objects
+hash.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_objects
+hash_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_objects
+hash_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_objects
+hash_dup.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_dup.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_objects
+hash_func.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_func.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_objects
+hash_meta.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_meta.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_objects
+hash_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_objects
+hash_page.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_page.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_objects
+hash_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_objects
+hash_reclaim.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_reclaim.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_objects
+hash_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_objects
+hash_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_objects
+hash_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hash/hash_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_objects
+hsearch.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../hsearch/hsearch.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_objects
+lock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_objects
+lock_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_objects
+lock_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_objects
+lock_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_objects
+lock_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_objects
+lock_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../lock/lock_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_objects
+log.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_objects
+log_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_auto.c_objects
+log_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_objects
+log_compare.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_compare.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_findckp.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_findckp.c_objects
+log_findckp.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_findckp.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_objects
+log_get.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_get.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_objects
+log_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_objects
+log_put.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_put.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_rec.c_objects
+log_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_register.c_objects
+log_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../log/log_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_objects
+mp_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_objects
+mp_bh.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_bh.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_objects
+mp_fget.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fget.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_objects
+mp_fopen.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fopen.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_objects
+mp_fput.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fput.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_objects
+mp_fset.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_fset.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_objects
+mp_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_objects
+mp_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_objects
+mp_register.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_register.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_objects
+mp_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_objects
+mp_sync.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_sync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_objects
+mp_trickle.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mp/mp_trickle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_objects
+mut_tas.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mut_tas.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_objects
+mutex.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../mutex/mutex.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_objects
+os_alloc.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_alloc.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_objects
+os_clock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_clock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_objects
+os_dir.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_dir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_objects
+os_errno.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_errno.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_objects
+os_fid.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fid.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_objects
+os_fsync.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_fsync.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_objects
+os_handle.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_handle.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_objects
+os_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_objects
+os_oflags.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_oflags.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_objects
+os_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_objects
+os_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_objects
+os_rename.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rename.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_objects
+os_root.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_root.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_objects
+os_rpath.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rpath.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_objects
+os_rw.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_rw.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_objects
+os_seek.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_seek.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_objects
+os_sleep.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_sleep.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_objects
+os_spin.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_spin.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_objects
+os_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_objects
+os_tmpdir.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_tmpdir.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_objects
+os_unlink.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os/os_unlink.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_objects
+os_vx_abs.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_abs.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_finit.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_finit.c_objects
+os_vx_finit.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_finit.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_objects
+os_vx_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../os_vxworks/os_vx_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_objects
+qam.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_objects
+qam_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_objects
+qam_conv.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_conv.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_objects
+qam_files.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_files.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_objects
+qam_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_objects
+qam_open.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_open.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_objects
+qam_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_objects
+qam_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_objects
+qam_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_objects
+qam_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../qam/qam_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_objects
+rep_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_objects
+rep_record.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_record.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_objects
+rep_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_objects
+rep_util.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rep/rep_util.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_objects
+client.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_objects
+db_server_clnt.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/db_server_clnt.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_objects
+gen_client.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_objects
+gen_client_ret.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_client/gen_client_ret.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/db_server_xdr.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/db_server_xdr.c_objects
+db_server_xdr.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../rpc_server/db_server_xdr.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_objects
+txn.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_objects
+txn_auto.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_auto.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_objects
+txn_method.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_method.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_objects
+txn_rec.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_rec.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_objects
+txn_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_objects
+txn_region.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_region.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_objects
+txn_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../txn/txn_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_objects
+xa.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_objects
+xa_db.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_db.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_objects
+xa_map.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../../xa/xa_map.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -DDEBUG \
+ -DDIAGNOSTIC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu.debug PENTIUM2gnu.debug PENTIUM2gnu.release
+<END>
+
+<BEGIN> COMPONENT_COM_TYPE
+
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../../btree/bt_compare.c \
+ $(PRJ_DIR)/../../btree/bt_conv.c \
+ $(PRJ_DIR)/../../btree/bt_curadj.c \
+ $(PRJ_DIR)/../../btree/bt_cursor.c \
+ $(PRJ_DIR)/../../btree/bt_delete.c \
+ $(PRJ_DIR)/../../btree/bt_method.c \
+ $(PRJ_DIR)/../../btree/bt_open.c \
+ $(PRJ_DIR)/../../btree/bt_put.c \
+ $(PRJ_DIR)/../../btree/bt_rec.c \
+ $(PRJ_DIR)/../../btree/bt_reclaim.c \
+ $(PRJ_DIR)/../../btree/bt_recno.c \
+ $(PRJ_DIR)/../../btree/bt_rsearch.c \
+ $(PRJ_DIR)/../../btree/bt_search.c \
+ $(PRJ_DIR)/../../btree/bt_split.c \
+ $(PRJ_DIR)/../../btree/bt_stat.c \
+ $(PRJ_DIR)/../../btree/bt_upgrade.c \
+ $(PRJ_DIR)/../../btree/bt_verify.c \
+ $(PRJ_DIR)/../../btree/btree_auto.c \
+ $(PRJ_DIR)/../../clib/getopt.c \
+ $(PRJ_DIR)/../../clib/snprintf.c \
+ $(PRJ_DIR)/../../clib/strcasecmp.c \
+ $(PRJ_DIR)/../../clib/vsnprintf.c \
+ $(PRJ_DIR)/../../common/db_byteorder.c \
+ $(PRJ_DIR)/../../common/db_err.c \
+ $(PRJ_DIR)/../../common/db_getlong.c \
+ $(PRJ_DIR)/../../common/db_log2.c \
+ $(PRJ_DIR)/../../common/util_log.c \
+ $(PRJ_DIR)/../../common/util_sig.c \
+ $(PRJ_DIR)/../../db/crdel_auto.c \
+ $(PRJ_DIR)/../../db/crdel_rec.c \
+ $(PRJ_DIR)/../../db/db.c \
+ $(PRJ_DIR)/../../db/db_am.c \
+ $(PRJ_DIR)/../../db/db_auto.c \
+ $(PRJ_DIR)/../../db/db_cam.c \
+ $(PRJ_DIR)/../../db/db_conv.c \
+ $(PRJ_DIR)/../../db/db_dispatch.c \
+ $(PRJ_DIR)/../../db/db_dup.c \
+ $(PRJ_DIR)/../../db/db_iface.c \
+ $(PRJ_DIR)/../../db/db_join.c \
+ $(PRJ_DIR)/../../db/db_meta.c \
+ $(PRJ_DIR)/../../db/db_method.c \
+ $(PRJ_DIR)/../../db/db_overflow.c \
+ $(PRJ_DIR)/../../db/db_pr.c \
+ $(PRJ_DIR)/../../db/db_rec.c \
+ $(PRJ_DIR)/../../db/db_reclaim.c \
+ $(PRJ_DIR)/../../db/db_ret.c \
+ $(PRJ_DIR)/../../db/db_upg.c \
+ $(PRJ_DIR)/../../db/db_upg_opd.c \
+ $(PRJ_DIR)/../../db/db_vrfy.c \
+ $(PRJ_DIR)/../../db/db_vrfyutil.c \
+ $(PRJ_DIR)/../../env/db_salloc.c \
+ $(PRJ_DIR)/../../env/db_shash.c \
+ $(PRJ_DIR)/../../env/env_method.c \
+ $(PRJ_DIR)/../../env/env_open.c \
+ $(PRJ_DIR)/../../env/env_recover.c \
+ $(PRJ_DIR)/../../env/env_region.c \
+ $(PRJ_DIR)/../../hash/hash.c \
+ $(PRJ_DIR)/../../hash/hash_auto.c \
+ $(PRJ_DIR)/../../hash/hash_conv.c \
+ $(PRJ_DIR)/../../hash/hash_dup.c \
+ $(PRJ_DIR)/../../hash/hash_func.c \
+ $(PRJ_DIR)/../../hash/hash_meta.c \
+ $(PRJ_DIR)/../../hash/hash_method.c \
+ $(PRJ_DIR)/../../hash/hash_page.c \
+ $(PRJ_DIR)/../../hash/hash_rec.c \
+ $(PRJ_DIR)/../../hash/hash_reclaim.c \
+ $(PRJ_DIR)/../../hash/hash_stat.c \
+ $(PRJ_DIR)/../../hash/hash_upgrade.c \
+ $(PRJ_DIR)/../../hash/hash_verify.c \
+ $(PRJ_DIR)/../../hsearch/hsearch.c \
+ $(PRJ_DIR)/../../lock/lock.c \
+ $(PRJ_DIR)/../../lock/lock_deadlock.c \
+ $(PRJ_DIR)/../../lock/lock_method.c \
+ $(PRJ_DIR)/../../lock/lock_region.c \
+ $(PRJ_DIR)/../../lock/lock_stat.c \
+ $(PRJ_DIR)/../../lock/lock_util.c \
+ $(PRJ_DIR)/../../log/log.c \
+ $(PRJ_DIR)/../../log/log_archive.c \
+ $(PRJ_DIR)/../../log/log_auto.c \
+ $(PRJ_DIR)/../../log/log_compare.c \
+ $(PRJ_DIR)/../../log/log_findckp.c \
+ $(PRJ_DIR)/../../log/log_get.c \
+ $(PRJ_DIR)/../../log/log_method.c \
+ $(PRJ_DIR)/../../log/log_put.c \
+ $(PRJ_DIR)/../../log/log_rec.c \
+ $(PRJ_DIR)/../../log/log_register.c \
+ $(PRJ_DIR)/../../mp/mp_alloc.c \
+ $(PRJ_DIR)/../../mp/mp_bh.c \
+ $(PRJ_DIR)/../../mp/mp_fget.c \
+ $(PRJ_DIR)/../../mp/mp_fopen.c \
+ $(PRJ_DIR)/../../mp/mp_fput.c \
+ $(PRJ_DIR)/../../mp/mp_fset.c \
+ $(PRJ_DIR)/../../mp/mp_method.c \
+ $(PRJ_DIR)/../../mp/mp_region.c \
+ $(PRJ_DIR)/../../mp/mp_register.c \
+ $(PRJ_DIR)/../../mp/mp_stat.c \
+ $(PRJ_DIR)/../../mp/mp_sync.c \
+ $(PRJ_DIR)/../../mp/mp_trickle.c \
+ $(PRJ_DIR)/../../mutex/mut_tas.c \
+ $(PRJ_DIR)/../../mutex/mutex.c \
+ $(PRJ_DIR)/../../os/os_alloc.c \
+ $(PRJ_DIR)/../../os/os_clock.c \
+ $(PRJ_DIR)/../../os/os_dir.c \
+ $(PRJ_DIR)/../../os/os_errno.c \
+ $(PRJ_DIR)/../../os/os_fid.c \
+ $(PRJ_DIR)/../../os/os_fsync.c \
+ $(PRJ_DIR)/../../os/os_handle.c \
+ $(PRJ_DIR)/../../os/os_method.c \
+ $(PRJ_DIR)/../../os/os_oflags.c \
+ $(PRJ_DIR)/../../os/os_open.c \
+ $(PRJ_DIR)/../../os/os_region.c \
+ $(PRJ_DIR)/../../os/os_rename.c \
+ $(PRJ_DIR)/../../os/os_root.c \
+ $(PRJ_DIR)/../../os/os_rpath.c \
+ $(PRJ_DIR)/../../os/os_rw.c \
+ $(PRJ_DIR)/../../os/os_seek.c \
+ $(PRJ_DIR)/../../os/os_sleep.c \
+ $(PRJ_DIR)/../../os/os_spin.c \
+ $(PRJ_DIR)/../../os/os_stat.c \
+ $(PRJ_DIR)/../../os/os_tmpdir.c \
+ $(PRJ_DIR)/../../os/os_unlink.c \
+ $(PRJ_DIR)/../../os_vxworks/os_vx_abs.c \
+ $(PRJ_DIR)/../../os_vxworks/os_vx_finit.c \
+ $(PRJ_DIR)/../../os_vxworks/os_vx_map.c \
+ $(PRJ_DIR)/../../qam/qam.c \
+ $(PRJ_DIR)/../../qam/qam_auto.c \
+ $(PRJ_DIR)/../../qam/qam_conv.c \
+ $(PRJ_DIR)/../../qam/qam_files.c \
+ $(PRJ_DIR)/../../qam/qam_method.c \
+ $(PRJ_DIR)/../../qam/qam_open.c \
+ $(PRJ_DIR)/../../qam/qam_rec.c \
+ $(PRJ_DIR)/../../qam/qam_stat.c \
+ $(PRJ_DIR)/../../qam/qam_upgrade.c \
+ $(PRJ_DIR)/../../qam/qam_verify.c \
+ $(PRJ_DIR)/../../rep/rep_method.c \
+ $(PRJ_DIR)/../../rep/rep_record.c \
+ $(PRJ_DIR)/../../rep/rep_region.c \
+ $(PRJ_DIR)/../../rep/rep_util.c \
+ $(PRJ_DIR)/../../rpc_client/client.c \
+ $(PRJ_DIR)/../../rpc_client/db_server_clnt.c \
+ $(PRJ_DIR)/../../rpc_client/gen_client.c \
+ $(PRJ_DIR)/../../rpc_client/gen_client_ret.c \
+ $(PRJ_DIR)/../../rpc_server/db_server_xdr.c \
+ $(PRJ_DIR)/../../txn/txn.c \
+ $(PRJ_DIR)/../../txn/txn_auto.c \
+ $(PRJ_DIR)/../../txn/txn_method.c \
+ $(PRJ_DIR)/../../txn/txn_rec.c \
+ $(PRJ_DIR)/../../txn/txn_recover.c \
+ $(PRJ_DIR)/../../txn/txn_region.c \
+ $(PRJ_DIR)/../../txn/txn_stat.c \
+ $(PRJ_DIR)/../../xa/xa.c \
+ $(PRJ_DIR)/../../xa/xa_db.c \
+ $(PRJ_DIR)/../../xa/xa_map.c \
+ $(PRJ_DIR)/compConfig.c \
+ $(PRJ_DIR)/../../common/util_arg.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUMgnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_archive/db_archive.c b/db/build_vxworks/db_archive/db_archive.c
new file mode 100644
index 000000000..6247280b8
--- /dev/null
+++ b/db/build_vxworks/db_archive/db_archive.c
@@ -0,0 +1,181 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_archive.c,v 11.26 2001/08/06 13:30:41 bostic Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+#include "clib_ext.h"
+
+int db_archive_main __P((int, char *[]));
+int db_archive_usage __P((void));
+int db_archive_version_check __P((const char *));
+
+int
+db_archive(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_archive", args, &argc, &argv);
+ return (db_archive_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_archive_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_archive";
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, ret, verbose;
+ char **file, *home, **list;
+
+ if ((ret = db_archive_version_check(progname)) != 0)
+ return (ret);
+
+ flags = 0;
+ e_close = exitval = verbose = 0;
+ home = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "ah:lsVv")) != EOF)
+ switch (ch) {
+ case 'a':
+ LF_SET(DB_ARCH_ABS);
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ LF_SET(DB_ARCH_LOG);
+ break;
+ case 's':
+ LF_SET(DB_ARCH_DATA);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (db_archive_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_archive_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose)
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home, DB_CREATE |
+ DB_INIT_LOG | DB_INIT_TXN | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Get the list of names. */
+ if ((ret = dbenv->log_archive(dbenv, &list, flags)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_archive");
+ goto shutdown;
+ }
+
+ /* Print the list of names. */
+ if (list != NULL) {
+ for (file = list; *file != NULL; ++file)
+ printf("%s\n", *file);
+ __os_free(dbenv, list, 0);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_archive_usage()
+{
+ (void)fprintf(stderr, "usage: db_archive [-alsVv] [-h home]\n");
+ return (EXIT_FAILURE);
+}
+
+int
+db_archive_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_archive/db_archive.wpj b/db/build_vxworks/db_archive/db_archive.wpj
new file mode 100644
index 000000000..c714c83d7
--- /dev/null
+++ b/db/build_vxworks/db_archive/db_archive.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_archive.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_archive.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_archive.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_archive.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_archive.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> FILE_db_archive.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_archive.c
+<END>
+
+<BEGIN> userComments
+db_archive
+<END>
diff --git a/db/build_vxworks/db_archive/db_archive/Makefile.component b/db/build_vxworks/db_archive/db_archive/Makefile.component
new file mode 100644
index 000000000..d178ce455
--- /dev/null
+++ b/db/build_vxworks/db_archive/db_archive/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_archive
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_archive.o \
+ compConfig.o
+COMPONENT_OBJS = db_archive.o
+DEPENDENCY_FILES = db_archive.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_archive.o \
+ compConfig.o
+COMPONENT_OBJS = db_archive.o
+DEPENDENCY_FILES = db_archive.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_archive.o \
+ compConfig.o
+COMPONENT_OBJS = db_archive.o
+DEPENDENCY_FILES = db_archive.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_archive.o \
+ compConfig.o
+COMPONENT_OBJS = db_archive.o
+DEPENDENCY_FILES = db_archive.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_archive.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_archive modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_archive.c
+ $(COMPILE_TRADITIONAL) db_archive_ctdt.c -o db_archive_ctdt.o
+ $(LD) -r -o db_archive.tmp $@ db_archive_ctdt.o
+ $(RM) $@
+ $(MV) db_archive.tmp $@
+#
+# Adds entry point table section to db_archive component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_archive modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_archive.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_archive.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_archive.o: >> $@
+ $(NM) db_archive.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_archive.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_archive/db_archive/Makefile.custom b/db/build_vxworks/db_archive/db_archive/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_archive/db_archive/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_archive/db_archive/component.cdf b/db/build_vxworks/db_archive/db_archive/component.cdf
new file mode 100644
index 000000000..cf88762cb
--- /dev/null
+++ b/db/build_vxworks/db_archive/db_archive/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_ARCHIVE {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_archive.o
+ NAME db_archive
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_archive.o {
+
+ NAME db_archive.o
+ SRC_PATH_NAME $PRJ_DIR/../db_archive.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_archive/db_archive/component.wpj b/db/build_vxworks/db_archive/db_archive/component.wpj
new file mode 100644
index 000000000..3ed6d0503
--- /dev/null
+++ b/db/build_vxworks/db_archive/db_archive/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_archive.c_objects
+db_archive.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_archive_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_archive.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_checkpoint/db_checkpoint.c b/db/build_vxworks/db_checkpoint/db_checkpoint.c
new file mode 100644
index 000000000..1f4eb5b54
--- /dev/null
+++ b/db/build_vxworks/db_checkpoint/db_checkpoint.c
@@ -0,0 +1,253 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_checkpoint.c,v 11.34 2001/10/04 12:44:24 bostic Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+#include "common_ext.h"
+#include "clib_ext.h"
+
+int db_checkpoint_main __P((int, char *[]));
+int db_checkpoint_usage __P((void));
+int db_checkpoint_version_check __P((const char *));
+
+int
+db_checkpoint(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_checkpoint", args, &argc, &argv);
+ return (db_checkpoint_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_checkpoint_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ DB_ENV *dbenv;
+ const char *progname = "db_checkpoint";
+ time_t now;
+ long argval;
+ u_int32_t flags, kbytes, minutes, seconds;
+ int ch, e_close, exitval, once, ret, verbose;
+ char *home, *logfile;
+
+ if ((ret = db_checkpoint_version_check(progname)) != 0)
+ return (ret);
+
+ /*
+ * !!!
+ * Don't allow a fully unsigned 32-bit number, some compilers get
+ * upset and require it to be specified in hexadecimal and so on.
+ */
+#define MAX_UINT32_T 2147483647
+
+ kbytes = minutes = 0;
+ e_close = exitval = once = verbose = 0;
+ flags = 0;
+ home = logfile = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "1h:k:L:p:Vv")) != EOF)
+ switch (ch) {
+ case '1':
+ once = 1;
+ flags = DB_FORCE;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ kbytes = argval;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 'p':
+ if (__db_getlong(NULL, progname,
+ optarg, 1, (long)MAX_UINT32_T, &argval))
+ return (EXIT_FAILURE);
+ minutes = argval;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (db_checkpoint_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_checkpoint_usage());
+
+ if (once == 0 && kbytes == 0 && minutes == 0) {
+ (void)fprintf(stderr,
+ "%s: at least one of -1, -k and -p must be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ /* Initialize the environment. */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Register the standard pgin/pgout functions, in case we do I/O. */
+ if ((ret = dbenv->memp_register(
+ dbenv, DB_FTYPE_SET, __db_pgin, __db_pgout)) != 0) {
+ dbenv->err(dbenv, ret,
+ "DB_ENV->memp_register: failed to register access method functions");
+ goto shutdown;
+ }
+
+ /*
+ * If we have only a time delay, then we'll sleep the right amount
+ * to wake up when a checkpoint is necessary. If we have a "kbytes"
+ * field set, then we'll check every 30 seconds.
+ */
+ seconds = kbytes != 0 ? 30 : minutes * 60;
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "checkpoint: %s", ctime(&now));
+ }
+
+ ret = dbenv->txn_checkpoint(dbenv, kbytes, minutes, flags);
+ while (ret == DB_INCOMPLETE) {
+ if (verbose)
+ dbenv->errx(dbenv,
+ "checkpoint did not finish, retrying\n");
+ (void)__os_sleep(dbenv, 2, 0);
+ ret = dbenv->txn_checkpoint(dbenv, 0, 0, flags);
+ }
+ if (ret != 0) {
+ dbenv->err(dbenv, ret, "txn_checkpoint");
+ goto shutdown;
+ }
+
+ if (once)
+ break;
+
+ (void)__os_sleep(dbenv, seconds, 0);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_checkpoint_usage()
+{
+ (void)fprintf(stderr,
+ "usage: db_checkpoint [-1Vv] [-h home] [-k kbytes] [-L file] [-p min]\n");
+ return (EXIT_FAILURE);
+}
+
+int
+db_checkpoint_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_checkpoint/db_checkpoint.wpj b/db/build_vxworks/db_checkpoint/db_checkpoint.wpj
new file mode 100644
index 000000000..0a6bba7a1
--- /dev/null
+++ b/db/build_vxworks/db_checkpoint/db_checkpoint.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_checkpoint.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_checkpoint.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_checkpoint.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> FILE_db_checkpoint.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_checkpoint.c
+<END>
+
+<BEGIN> userComments
+db_checkpoint
+<END>
diff --git a/db/build_vxworks/db_checkpoint/db_checkpoint/Makefile.component b/db/build_vxworks/db_checkpoint/db_checkpoint/Makefile.component
new file mode 100644
index 000000000..6b04036f2
--- /dev/null
+++ b/db/build_vxworks/db_checkpoint/db_checkpoint/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_checkpoint
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_checkpoint.o \
+ compConfig.o
+COMPONENT_OBJS = db_checkpoint.o
+DEPENDENCY_FILES = db_checkpoint.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_checkpoint.o \
+ compConfig.o
+COMPONENT_OBJS = db_checkpoint.o
+DEPENDENCY_FILES = db_checkpoint.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_checkpoint.o \
+ compConfig.o
+COMPONENT_OBJS = db_checkpoint.o
+DEPENDENCY_FILES = db_checkpoint.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_checkpoint.o \
+ compConfig.o
+COMPONENT_OBJS = db_checkpoint.o
+DEPENDENCY_FILES = db_checkpoint.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_checkpoint.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_checkpoint modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_checkpoint.c
+ $(COMPILE_TRADITIONAL) db_checkpoint_ctdt.c -o db_checkpoint_ctdt.o
+ $(LD) -r -o db_checkpoint.tmp $@ db_checkpoint_ctdt.o
+ $(RM) $@
+ $(MV) db_checkpoint.tmp $@
+#
+# Adds entry point table section to db_checkpoint component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_checkpoint modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_checkpoint.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_checkpoint.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_checkpoint.o: >> $@
+ $(NM) db_checkpoint.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_checkpoint.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom b/db/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_checkpoint/db_checkpoint/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_checkpoint/db_checkpoint/component.cdf b/db/build_vxworks/db_checkpoint/db_checkpoint/component.cdf
new file mode 100644
index 000000000..ea05c3a61
--- /dev/null
+++ b/db/build_vxworks/db_checkpoint/db_checkpoint/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_CHECKPOINT {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_checkpoint.o
+ NAME db_checkpoint
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_checkpoint.o {
+
+ NAME db_checkpoint.o
+ SRC_PATH_NAME $PRJ_DIR/../db_checkpoint.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_checkpoint/db_checkpoint/component.wpj b/db/build_vxworks/db_checkpoint/db_checkpoint/component.wpj
new file mode 100644
index 000000000..d32327e43
--- /dev/null
+++ b/db/build_vxworks/db_checkpoint/db_checkpoint/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint.c_objects
+db_checkpoint.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_checkpoint_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_checkpoint.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_deadlock/db_deadlock.c b/db/build_vxworks/db_deadlock/db_deadlock.c
new file mode 100644
index 000000000..f4a99097e
--- /dev/null
+++ b/db/build_vxworks/db_deadlock/db_deadlock.c
@@ -0,0 +1,249 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_deadlock.c,v 11.33 2001/10/04 21:15:27 bostic Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#include <string.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "clib_ext.h"
+
+int db_deadlock_main __P((int, char *[]));
+int db_deadlock_usage __P((void));
+int db_deadlock_version_check __P((const char *));
+
+int
+db_deadlock(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_deadlock", args, &argc, &argv);
+ return (db_deadlock_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_deadlock_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_deadlock";
+ DB_ENV *dbenv;
+ u_int32_t atype;
+ time_t now;
+ long secs, usecs;
+ int ch, e_close, exitval, ret, verbose;
+ char *home, *logfile, *str;
+
+ if ((ret = db_deadlock_version_check(progname)) != 0)
+ return (ret);
+
+ atype = DB_LOCK_DEFAULT;
+ home = logfile = NULL;
+ secs = usecs = 0;
+ e_close = exitval = verbose = 0;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "a:h:L:t:Vvw")) != EOF)
+ switch (ch) {
+ case 'a':
+ switch (optarg[0]) {
+ case 'e':
+ atype = DB_LOCK_EXPIRE;
+ break;
+ case 'm':
+ atype = DB_LOCK_MAXLOCKS;
+ break;
+ case 'n':
+ atype = DB_LOCK_MINLOCKS;
+ break;
+ case 'o':
+ atype = DB_LOCK_OLDEST;
+ break;
+ case 'w':
+ atype = DB_LOCK_MINWRITE;
+ break;
+ case 'y':
+ atype = DB_LOCK_YOUNGEST;
+ break;
+ default:
+ return (db_deadlock_usage());
+ /* NOTREACHED */
+ }
+ if (optarg[1] != '\0')
+ return (db_deadlock_usage());
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'L':
+ logfile = optarg;
+ break;
+ case 't':
+ if ((str = strchr(optarg, '.')) != NULL) {
+ *str++ = '\0';
+ if (*str != '\0' && __db_getlong(
+ NULL, progname, str, 0, LONG_MAX, &usecs))
+ return (EXIT_FAILURE);
+ }
+ if (*optarg != '\0' && __db_getlong(
+ NULL, progname, optarg, 0, LONG_MAX, &secs))
+ return (EXIT_FAILURE);
+ if (secs == 0 && usecs == 0)
+ return (db_deadlock_usage());
+
+ break;
+
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case 'w': /* Undocumented. */
+ /* Detect every 100ms (100000 us) when polling. */
+ secs = 0;
+ usecs = 100000;
+ break;
+ case '?':
+ default:
+ return (db_deadlock_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_deadlock_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /* Log our process ID. */
+ if (logfile != NULL && __db_util_logset(progname, logfile))
+ goto shutdown;
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_DEADLOCK, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_WAITSFOR, 1);
+ }
+
+ /* An environment is required. */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ while (!__db_util_interrupted()) {
+ if (verbose) {
+ (void)time(&now);
+ dbenv->errx(dbenv, "running at %.24s", ctime(&now));
+ }
+
+ if ((ret = dbenv->lock_detect(dbenv, 0, atype, NULL)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->lock_detect");
+ goto shutdown;
+ }
+
+ /* Make a pass every "secs" secs and "usecs" usecs. */
+ if (secs == 0 && usecs == 0)
+ break;
+ (void)__os_sleep(dbenv, secs, usecs);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the logfile. */
+ if (logfile != NULL)
+ remove(logfile);
+
+ /* Clean up the environment. */
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_deadlock_usage()
+{
+ (void)fprintf(stderr,
+"usage: db_deadlock [-Vv]\n\t[-a e | m | n | o | w | y] [-h home] [-L file] [-t sec.usec]\n");
+ return (EXIT_FAILURE);
+}
+
+int
+db_deadlock_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_deadlock/db_deadlock.wpj b/db/build_vxworks/db_deadlock/db_deadlock.wpj
new file mode 100644
index 000000000..5844b4f17
--- /dev/null
+++ b/db/build_vxworks/db_deadlock/db_deadlock.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_deadlock.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_deadlock.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_deadlock.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_deadlock.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_deadlock.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> FILE_db_deadlock.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_deadlock.c
+<END>
+
+<BEGIN> userComments
+db_deadlock
+<END>
diff --git a/db/build_vxworks/db_deadlock/db_deadlock/Makefile.component b/db/build_vxworks/db_deadlock/db_deadlock/Makefile.component
new file mode 100644
index 000000000..803a941b9
--- /dev/null
+++ b/db/build_vxworks/db_deadlock/db_deadlock/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_deadlock
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_deadlock.o \
+ compConfig.o
+COMPONENT_OBJS = db_deadlock.o
+DEPENDENCY_FILES = db_deadlock.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_deadlock.o \
+ compConfig.o
+COMPONENT_OBJS = db_deadlock.o
+DEPENDENCY_FILES = db_deadlock.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_deadlock.o \
+ compConfig.o
+COMPONENT_OBJS = db_deadlock.o
+DEPENDENCY_FILES = db_deadlock.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_deadlock.o \
+ compConfig.o
+COMPONENT_OBJS = db_deadlock.o
+DEPENDENCY_FILES = db_deadlock.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_deadlock.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_deadlock modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_deadlock.c
+ $(COMPILE_TRADITIONAL) db_deadlock_ctdt.c -o db_deadlock_ctdt.o
+ $(LD) -r -o db_deadlock.tmp $@ db_deadlock_ctdt.o
+ $(RM) $@
+ $(MV) db_deadlock.tmp $@
+#
+# Adds entry point table section to db_deadlock component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_deadlock modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_deadlock.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_deadlock.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_deadlock.o: >> $@
+ $(NM) db_deadlock.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_deadlock.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_deadlock/db_deadlock/Makefile.custom b/db/build_vxworks/db_deadlock/db_deadlock/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_deadlock/db_deadlock/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_deadlock/db_deadlock/component.cdf b/db/build_vxworks/db_deadlock/db_deadlock/component.cdf
new file mode 100644
index 000000000..efc498475
--- /dev/null
+++ b/db/build_vxworks/db_deadlock/db_deadlock/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_DEADLOCK {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_deadlock.o
+ NAME db_deadlock
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_deadlock.o {
+
+ NAME db_deadlock.o
+ SRC_PATH_NAME $PRJ_DIR/../db_deadlock.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_deadlock/db_deadlock/component.wpj b/db/build_vxworks/db_deadlock/db_deadlock/component.wpj
new file mode 100644
index 000000000..5175bf9c6
--- /dev/null
+++ b/db/build_vxworks/db_deadlock/db_deadlock/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock.c_objects
+db_deadlock.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_deadlock_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_deadlock.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_dump/db_dump.c b/db/build_vxworks/db_dump/db_dump.c
new file mode 100644
index 000000000..4d07511a5
--- /dev/null
+++ b/db/build_vxworks/db_dump/db_dump.c
@@ -0,0 +1,601 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_dump.c,v 11.63 2001/10/11 22:46:26 ubell Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "btree.h"
+#include "hash.h"
+#include "lock.h"
+#include "clib_ext.h"
+
+int db_dump_db_init __P((DB_ENV *, char *, int));
+int db_dump_dump __P((DB *, int, int));
+int db_dump_dump_sub __P((DB_ENV *, DB *, char *, int, int));
+int db_dump_is_sub __P((DB *, int *));
+int db_dump_main __P((int, char *[]));
+int db_dump_show_subs __P((DB *));
+int db_dump_usage __P((void));
+int db_dump_version_check __P((const char *));
+
+int
+db_dump(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_dump", args, &argc, &argv);
+ return (db_dump_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_dump_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_dump";
+ DB_ENV *dbenv;
+ DB *dbp;
+ int ch, d_close;
+ int e_close, exitval;
+ int lflag, nflag, pflag, ret, rflag, Rflag, subs, keyflag;
+ char *dopt, *home, *subname;
+
+ if ((ret = db_dump_version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ d_close = e_close = exitval = lflag = nflag = pflag = rflag = Rflag = 0;
+ keyflag = 0;
+ dopt = home = subname = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "d:f:h:klNprRs:V")) != EOF)
+ switch (ch) {
+ case 'd':
+ dopt = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "w", stdout) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'k':
+ keyflag = 1;
+ break;
+ case 'l':
+ lflag = 1;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'p':
+ pflag = 1;
+ break;
+ case 's':
+ subname = optarg;
+ break;
+ case 'R':
+ Rflag = 1;
+ /* DB_AGGRESSIVE requires DB_SALVAGE */
+ /* FALLTHROUGH */
+ case 'r':
+ rflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_dump_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (db_dump_usage());
+
+ if (dopt != NULL && pflag) {
+ fprintf(stderr,
+ "%s: the -d and -p options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ if (lflag && subname != NULL) {
+ fprintf(stderr,
+ "%s: the -l and -s options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (keyflag && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -k and -r or -R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ if (subname != NULL && rflag) {
+ fprintf(stderr, "%s: %s",
+ "the -s and -r or R options may not both be specified\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto err;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto err;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto err;
+ }
+ }
+
+ /* Initialize the environment. */
+ if (db_dump_db_init(dbenv, home, rflag) != 0)
+ goto err;
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto err;
+ }
+ d_close = 1;
+
+ /*
+ * If we're salvaging, don't do an open; it might not be safe.
+ * Dispatch now into the salvager.
+ */
+ if (rflag) {
+ if ((ret = dbp->verify(dbp, argv[0], NULL, stdout,
+ DB_SALVAGE | (Rflag ? DB_AGGRESSIVE : 0))) != 0)
+ goto err;
+ exitval = 0;
+ goto done;
+ }
+
+ if ((ret = dbp->open(dbp,
+ argv[0], subname, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "open: %s", argv[0]);
+ goto err;
+ }
+
+ if (dopt != NULL) {
+ if (__db_dump(dbp, dopt, NULL)) {
+ dbp->err(dbp, ret, "__db_dump: %s", argv[0]);
+ goto err;
+ }
+ } else if (lflag) {
+ if (db_dump_is_sub(dbp, &subs))
+ goto err;
+ if (subs == 0) {
+ dbp->errx(dbp,
+ "%s: does not contain multiple databases", argv[0]);
+ goto err;
+ }
+ if (db_dump_show_subs(dbp))
+ goto err;
+ } else {
+ subs = 0;
+ if (subname == NULL && db_dump_is_sub(dbp, &subs))
+ goto err;
+ if (subs) {
+ if (db_dump_dump_sub(dbenv, dbp, argv[0], pflag, keyflag))
+ goto err;
+ } else
+ if (__db_prheader(dbp, NULL, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ db_dump_dump(dbp, pflag, keyflag))
+ goto err;
+ }
+
+ if (0) {
+err: exitval = 1;
+ }
+done: if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_dump_db_init(dbenv, home, is_salvage)
+ DB_ENV *dbenv;
+ char *home;
+ int is_salvage;
+{
+ int ret;
+
+ /*
+ * Try and use the underlying environment when opening a database.
+ * We wish to use the buffer pool so our information is as up-to-date
+ * as possible, even if the mpool cache hasn't been flushed.
+ *
+ * If we are not doing a salvage, we wish to use the DB_JOINENV flag;
+ * if a locking system is present, this will let us use it and be
+ * safe to run concurrently with other threads of control. (We never
+ * need to use transactions explicitly, as we're read-only.) Note
+ * that in CDB, too, this will configure our environment
+ * appropriately, and our cursors will (correctly) do locking as CDB
+ * read cursors.
+ *
+ * If we are doing a salvage, the verification code will protest
+ * if we initialize transactions, logging, or locking; do an
+ * explicit DB_INIT_MPOOL to try to join any existing environment
+ * before we create our own.
+ */
+ if (dbenv->open(dbenv, home,
+ DB_USE_ENVIRON | (is_salvage ? DB_INIT_MPOOL : DB_JOINENV), 0) == 0)
+ return (0);
+
+ /*
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ *
+ * Note that for many databases with a large page size, the default
+ * cache size is too small--at 64K, we can fit only four pages into
+ * the default of 256K. Because this is a utility, it's probably
+ * reasonable to grab more--real restrictive environments aren't
+ * going to run db_dump from a shell. Since we malloc a megabyte for
+ * the bulk get buffer, be conservative and use a megabyte here too.
+ */
+ if ((ret = dbenv->set_cachesize(dbenv, 0, MEGABYTE, 1)) == 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * is_sub --
+ * Return if the database contains subdatabases.
+ */
+int
+db_dump_is_sub(dbp, yesno)
+ DB *dbp;
+ int *yesno;
+{
+ DB_BTREE_STAT *btsp;
+ DB_HASH_STAT *hsp;
+ int ret;
+
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if ((ret = dbp->stat(dbp, &btsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = btsp->bt_metaflags & BTM_SUBDB ? 1 : 0;
+ __os_free(dbp->dbenv, btsp, sizeof(DB_BTREE_STAT));
+ break;
+ case DB_HASH:
+ if ((ret = dbp->stat(dbp, &hsp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "DB->stat");
+ return (ret);
+ }
+ *yesno = hsp->hash_metaflags & DB_HASH_SUBDB ? 1 : 0;
+ __os_free(dbp->dbenv, hsp, sizeof(DB_HASH_STAT));
+ break;
+ case DB_QUEUE:
+ break;
+ default:
+ dbp->errx(dbp, "unknown database type");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump_sub --
+ * Dump out the records for a DB containing subdatabases.
+ */
+int
+db_dump_dump_sub(dbenv, parent_dbp, parent_name, pflag, keyflag)
+ DB_ENV *dbenv;
+ DB *parent_dbp;
+ char *parent_name;
+ int pflag, keyflag;
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+ char *subdb;
+
+ /*
+ * Get a cursor and step through the database, dumping out each
+ * subdatabase.
+ */
+ if ((ret = parent_dbp->cursor(parent_dbp, NULL, &dbcp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ /* Nul terminate the subdatabase name. */
+ if ((subdb = malloc(key.size + 1)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ memcpy(subdb, key.data, key.size);
+ subdb[key.size] = '\0';
+
+ /* Create the DB object and open the file. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ free(subdb);
+ return (1);
+ }
+ if ((ret = dbp->open(dbp,
+ parent_name, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0)
+ dbp->err(dbp, ret,
+ "DB->open: %s:%s", parent_name, subdb);
+ if (ret == 0 &&
+ (__db_prheader(dbp, subdb, pflag, keyflag, stdout,
+ __db_verify_callback, NULL, 0) ||
+ db_dump_dump(dbp, pflag, keyflag)))
+ ret = 1;
+ (void)dbp->close(dbp, 0);
+ free(subdb);
+ if (ret != 0)
+ return (1);
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+
+ return (0);
+}
+
+/*
+ * show_subs --
+ * Display the subdatabases for a database.
+ */
+int
+db_dump_show_subs(dbp)
+ DB *dbp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ int ret;
+
+ /*
+ * Get a cursor and step through the database, printing out the key
+ * of each key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0) {
+ if ((ret = __db_prdbt(&key, 1, NULL, stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ return (1);
+ }
+ }
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ return (1);
+ }
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * dump --
+ * Dump out the records for a DB.
+ */
+int
+db_dump_dump(dbp, pflag, keyflag)
+ DB *dbp;
+ int pflag, keyflag;
+{
+ DBC *dbcp;
+ DBT key, data;
+ DBT keyret, dataret;
+ db_recno_t recno;
+ int is_recno, failed, ret;
+ void *pointer;
+
+ /*
+ * Get a cursor and step through the database, printing out each
+ * key/data pair.
+ */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ return (1);
+ }
+
+ failed = 0;
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ data.data = malloc(1024 * 1024);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = 1024 * 1024;
+ data.flags = DB_DBT_USERMEM;
+ is_recno = (dbp->type == DB_RECNO || dbp->type == DB_QUEUE);
+ keyflag = is_recno ? keyflag : 1;
+ if (is_recno) {
+ keyret.data = &recno;
+ keyret.size = sizeof(recno);
+ }
+
+retry:
+ while ((ret =
+ dbcp->c_get(dbcp, &key, &data, DB_NEXT | DB_MULTIPLE_KEY)) == 0) {
+ DB_MULTIPLE_INIT(pointer, &data);
+ for (;;) {
+ if (is_recno)
+ DB_MULTIPLE_RECNO_NEXT(pointer, &data,
+ recno, dataret.data, dataret.size);
+ else
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ &data, keyret.data,
+ keyret.size, dataret.data, dataret.size);
+
+ if (dataret.data == NULL)
+ break;
+
+ if ((keyflag && (ret = __db_prdbt(&keyret,
+ pflag, " ", stdout, __db_verify_callback,
+ is_recno, NULL)) != 0) || (ret =
+ __db_prdbt(&dataret, pflag, " ", stdout,
+ __db_verify_callback, 0, NULL)) != 0) {
+ dbp->errx(dbp, NULL);
+ failed = 1;
+ goto err;
+ }
+ }
+ }
+ if (ret == ENOMEM) {
+ data.data = realloc(data.data, data.size);
+ if (data.data == NULL) {
+ dbp->err(dbp, ENOMEM, "bulk get buffer");
+ failed = 1;
+ goto err;
+ }
+ data.ulen = data.size;
+ goto retry;
+ }
+
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ failed = 1;
+ }
+
+err: if (data.data != NULL)
+ free(data.data);
+
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ failed = 1;
+ }
+
+ (void)__db_prfooter(stdout, __db_verify_callback);
+ return (failed);
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+db_dump_usage()
+{
+ (void)fprintf(stderr, "usage: %s\n",
+"db_dump [-klNprRV] [-d ahr] [-f output] [-h home] [-s database] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+db_dump_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_dump/db_dump.wpj b/db/build_vxworks/db_dump/db_dump.wpj
new file mode 100644
index 000000000..a1db74413
--- /dev/null
+++ b/db/build_vxworks/db_dump/db_dump.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_dump.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_dump.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_dump.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_dump.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_dump.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> FILE_db_dump.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_dump.c
+<END>
+
+<BEGIN> userComments
+db_dump
+<END>
diff --git a/db/build_vxworks/db_dump/db_dump/Makefile.component b/db/build_vxworks/db_dump/db_dump/Makefile.component
new file mode 100644
index 000000000..7f537dcfc
--- /dev/null
+++ b/db/build_vxworks/db_dump/db_dump/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_dump
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_dump.o \
+ compConfig.o
+COMPONENT_OBJS = db_dump.o
+DEPENDENCY_FILES = db_dump.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_dump.o \
+ compConfig.o
+COMPONENT_OBJS = db_dump.o
+DEPENDENCY_FILES = db_dump.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_dump.o \
+ compConfig.o
+COMPONENT_OBJS = db_dump.o
+DEPENDENCY_FILES = db_dump.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_dump.o \
+ compConfig.o
+COMPONENT_OBJS = db_dump.o
+DEPENDENCY_FILES = db_dump.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_dump.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_dump modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_dump.c
+ $(COMPILE_TRADITIONAL) db_dump_ctdt.c -o db_dump_ctdt.o
+ $(LD) -r -o db_dump.tmp $@ db_dump_ctdt.o
+ $(RM) $@
+ $(MV) db_dump.tmp $@
+#
+# Adds entry point table section to db_dump component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_dump modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_dump.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_dump.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_dump.o: >> $@
+ $(NM) db_dump.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_dump.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_dump/db_dump/Makefile.custom b/db/build_vxworks/db_dump/db_dump/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_dump/db_dump/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_dump/db_dump/component.cdf b/db/build_vxworks/db_dump/db_dump/component.cdf
new file mode 100644
index 000000000..5c1d4ccf3
--- /dev/null
+++ b/db/build_vxworks/db_dump/db_dump/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_DUMP {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_dump.o
+ NAME db_dump
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_dump.o {
+
+ NAME db_dump.o
+ SRC_PATH_NAME $PRJ_DIR/../db_dump.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_dump/db_dump/component.wpj b/db/build_vxworks/db_dump/db_dump/component.wpj
new file mode 100644
index 000000000..b78e4992f
--- /dev/null
+++ b/db/build_vxworks/db_dump/db_dump/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_dump.c_objects
+db_dump.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_dump_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_dump.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_load/db_load.c b/db/build_vxworks/db_load/db_load.c
new file mode 100644
index 000000000..1fa22c334
--- /dev/null
+++ b/db/build_vxworks/db_load/db_load.c
@@ -0,0 +1,1132 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_load.c,v 11.47 2001/10/11 22:46:27 ubell Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "clib_ext.h"
+
+void db_load_badend __P((DB_ENV *));
+void db_load_badnum __P((DB_ENV *));
+int db_load_configure __P((DB_ENV *, DB *, char **, char **, int *));
+int db_load_convprintable __P((DB_ENV *, char *, char **));
+int db_load_db_init __P((DB_ENV *, char *));
+int db_load_dbt_rdump __P((DB_ENV *, DBT *));
+int db_load_dbt_rprint __P((DB_ENV *, DBT *));
+int db_load_dbt_rrecno __P((DB_ENV *, DBT *, int));
+int db_load_digitize __P((DB_ENV *, int, int *));
+int db_load_load __P((DB_ENV *, char *, DBTYPE, char **, int, u_int32_t, int *));
+int db_load_main __P((int, char *[]));
+int db_load_rheader __P((DB_ENV *, DB *, DBTYPE *, char **, int *, int *));
+int db_load_usage __P((void));
+int db_load_version_check __P((const char *));
+
+typedef struct { /* XXX: Globals. */
+ const char *progname; /* Program name. */
+ u_long lineno; /* Input file line number. */
+ int endodata; /* Reached the end of a database. */
+ int endofile; /* Reached the end of the input. */
+ int version; /* Input version. */
+} LDG;
+
+#define G(f) ((LDG *)dbenv->app_private)->f
+
+int
+db_load(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_load", args, &argc, &argv);
+ return (db_load_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_load_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ DBTYPE dbtype;
+ DB_ENV *dbenv;
+ LDG ldg;
+ u_int32_t db_nooverwrite;
+ int ch, existed, exitval, no_header, ret;
+ char **clist, **clp, *home;
+
+ ldg.progname = "db_load";
+ ldg.lineno = 0;
+ ldg.endodata = ldg.endofile = 0;
+ ldg.version = 1;
+
+ if ((ret = db_load_version_check(ldg.progname)) != 0)
+ return (ret);
+
+ home = NULL;
+ db_nooverwrite = 0;
+ exitval = no_header = 0;
+ dbtype = DB_UNKNOWN;
+
+ /* Allocate enough room for configuration arguments. */
+ if ((clp = clist = (char **)calloc(argc + 1, sizeof(char *))) == NULL) {
+ fprintf(stderr, "%s: %s\n", ldg.progname, strerror(ENOMEM));
+ return (EXIT_FAILURE);
+ }
+
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "c:f:h:nTt:V")) != EOF)
+ switch (ch) {
+ case 'c':
+ *clp++ = optarg;
+ break;
+ case 'f':
+ if (freopen(optarg, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s: reopen: %s\n",
+ ldg.progname, optarg, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'n':
+ db_nooverwrite = DB_NOOVERWRITE;
+ break;
+ case 'T':
+ no_header = 1;
+ break;
+ case 't':
+ if (strcmp(optarg, "btree") == 0) {
+ dbtype = DB_BTREE;
+ break;
+ }
+ if (strcmp(optarg, "hash") == 0) {
+ dbtype = DB_HASH;
+ break;
+ }
+ if (strcmp(optarg, "recno") == 0) {
+ dbtype = DB_RECNO;
+ break;
+ }
+ if (strcmp(optarg, "queue") == 0) {
+ dbtype = DB_QUEUE;
+ break;
+ }
+ return (db_load_usage());
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_load_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 1)
+ return (db_load_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object initialized for error reporting, and
+ * then open it.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", ldg.progname, db_strerror(ret));
+ goto shutdown;
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, ldg.progname);
+ if (db_load_db_init(dbenv, home) != 0)
+ goto shutdown;
+ dbenv->app_private = &ldg;
+
+ while (!ldg.endofile)
+ if (db_load_load(dbenv, argv[0],
+ dbtype, clist, no_header, db_nooverwrite, &existed) != 0)
+ goto shutdown;
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", ldg.progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+ free(clist);
+
+ /*
+ * Return 0 on success, 1 if keys existed already, and 2 on failure.
+ *
+ * Technically, this is wrong, because exit of anything other than
+ * 0 is implementation-defined by the ANSI C standard. I don't see
+ * any good solutions that don't involve API changes.
+ */
+ return (exitval == 0 ? (existed == 0 ? 0 : 1) : 2);
+}
+
+/*
+ * load --
+ * Load a database.
+ */
+int
+db_load_load(dbenv, name, argtype, clist, no_header, db_nooverwrite, existedp)
+ DB_ENV *dbenv;
+ char *name, **clist;
+ DBTYPE argtype;
+ int no_header, *existedp;
+ u_int32_t db_nooverwrite;
+{
+ DB *dbp;
+ DBT key, rkey, data, *readp, *writep;
+ DBTYPE dbtype;
+ DB_TXN *ctxn, *txn;
+ db_recno_t recno, datarecno;
+ int checkprint, hexkeys, keys, ret, rval;
+ int keyflag, ascii_recno;
+ char *subdb;
+
+ *existedp = 0;
+ G(endodata) = 0;
+
+ subdb = NULL;
+ ctxn = txn = NULL;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ memset(&rkey, 0, sizeof(DBT));
+
+ /* Create the DB object. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ return (1);
+ }
+
+ dbtype = DB_UNKNOWN;
+ keys = -1;
+ hexkeys = -1;
+ keyflag = -1;
+ /* Read the header -- if there's no header, we expect flat text. */
+ if (no_header) {
+ checkprint = 1;
+ dbtype = argtype;
+ } else {
+ if (db_load_rheader(dbenv,
+ dbp, &dbtype, &subdb, &checkprint, &keys) != 0)
+ goto err;
+ if (G(endofile))
+ goto done;
+ }
+
+ /*
+ * Apply command-line configuration changes. (We apply command-line
+ * configuration changes to all databases that are loaded, e.g., all
+ * subdatabases.)
+ */
+ if (db_load_configure(dbenv, dbp, clist, &subdb, &keyflag))
+ goto err;
+
+ if (keys != 1) {
+ if (keyflag == 1) {
+ dbp->err(dbp, EINVAL, "No keys specified in file");
+ goto err;
+ }
+ }
+ else if (keyflag == 0) {
+ dbp->err(dbp, EINVAL, "Keys specified in file");
+ goto err;
+ }
+ else
+ keyflag = 1;
+
+ if (dbtype == DB_BTREE || dbtype == DB_HASH) {
+ if (keyflag == 0)
+ dbp->err(dbp,
+ EINVAL, "Btree and Hash must specify keys");
+ else
+ keyflag = 1;
+ }
+
+ if (argtype != DB_UNKNOWN) {
+
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE)
+ if (keyflag != 1 && argtype != DB_RECNO
+ && argtype != DB_QUEUE) {
+ dbenv->errx(dbenv,
+ "improper database type conversion specified");
+ goto err;
+ }
+ dbtype = argtype;
+ }
+
+ if (dbtype == DB_UNKNOWN) {
+ dbenv->errx(dbenv, "no database type specified");
+ goto err;
+ }
+
+ if (keyflag == -1)
+ keyflag = 0;
+
+ /*
+ * Recno keys have only been printed in hexadecimal starting
+ * with db_dump format version 3 (DB 3.2).
+ *
+ * !!!
+ * Note that version is set in db_load_rheader(), which must be called before
+ * this assignment.
+ */
+ hexkeys = (G(version) >= 3 && keyflag == 1 && checkprint == 0);
+
+ if (keyflag == 1 && (dbtype == DB_RECNO || dbtype == DB_QUEUE))
+ ascii_recno = 1;
+ else
+ ascii_recno = 0;
+
+ /* Open the DB file. */
+ if ((ret = dbp->open(dbp,
+ name, subdb, dbtype, DB_CREATE, __db_omode("rwrwrw"))) != 0) {
+ dbp->err(dbp, ret, "DB->open: %s", name);
+ goto err;
+ }
+
+ /* Initialize the key/data pair. */
+ readp = &key;
+ writep = &key;
+ if (dbtype == DB_RECNO || dbtype == DB_QUEUE) {
+ key.size = sizeof(recno);
+ if (keyflag) {
+ key.data = &datarecno;
+ if (checkprint) {
+ readp = &rkey;
+ goto key_data;
+ }
+ }
+ else
+ key.data = &recno;
+ } else
+key_data: if ((readp->data =
+ (void *)malloc(readp->ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+ if ((data.data = (void *)malloc(data.ulen = 1024)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ goto err;
+ }
+
+ if (TXN_ON(dbenv) &&
+ (ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+
+ /* Get each key/data pair and add them to the database. */
+ for (recno = 1; !__db_util_interrupted(); ++recno) {
+ if (!keyflag)
+ if (checkprint) {
+ if (db_load_dbt_rprint(dbenv, &data))
+ goto err;
+ } else {
+ if (db_load_dbt_rdump(dbenv, &data))
+ goto err;
+ }
+ else
+ if (checkprint) {
+ if (db_load_dbt_rprint(dbenv, readp))
+ goto err;
+ if (!G(endodata) && db_load_dbt_rprint(dbenv, &data))
+ goto fmt;
+ } else {
+ if (ascii_recno) {
+ if (db_load_dbt_rrecno(dbenv, readp, hexkeys))
+ goto err;
+ } else
+ if (db_load_dbt_rdump(dbenv, readp))
+ goto err;
+ if (!G(endodata) && db_load_dbt_rdump(dbenv, &data)) {
+fmt: dbenv->errx(dbenv,
+ "odd number of key/data pairs");
+ goto err;
+ }
+ }
+ if (G(endodata))
+ break;
+ if (readp != writep) {
+ if (sscanf(readp->data, "%ud", &datarecno) != 1)
+ dbenv->errx(dbenv,
+ "%s: non-integer key at line: %d",
+ name, !keyflag ? recno : recno * 2 - 1);
+ if (datarecno == 0)
+ dbenv->errx(dbenv, "%s: zero key at line: %d",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+ }
+retry: if (txn != NULL)
+ if ((ret = dbenv->txn_begin(dbenv, txn, &ctxn, 0)) != 0)
+ goto err;
+ switch (ret =
+ dbp->put(dbp, ctxn, writep, &data, db_nooverwrite)) {
+ case 0:
+ if (ctxn != NULL) {
+ if ((ret =
+ ctxn->commit(ctxn, DB_TXN_NOSYNC)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ break;
+ case DB_KEYEXIST:
+ *existedp = 1;
+ dbenv->errx(dbenv,
+ "%s: line %d: key already exists, not loaded:",
+ name,
+ !keyflag ? recno : recno * 2 - 1);
+
+ (void)__db_prdbt(&key, checkprint, 0, stderr,
+ __db_verify_callback, 0, NULL);
+ break;
+ case DB_LOCK_DEADLOCK:
+ /* If we have a child txn, retry--else it's fatal. */
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ goto retry;
+ }
+ /* FALLTHROUGH */
+ default:
+ dbenv->err(dbenv, ret, NULL);
+ if (ctxn != NULL) {
+ (void)ctxn->abort(ctxn);
+ ctxn = NULL;
+ }
+ goto err;
+ }
+ if (ctxn != NULL) {
+ if ((ret = ctxn->abort(ctxn)) != 0)
+ goto err;
+ ctxn = NULL;
+ }
+ }
+done: rval = 0;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL && (ret = txn->commit(txn, 0)) != 0) {
+ txn = NULL;
+ goto err;
+ }
+
+ if (0) {
+err: rval = 1;
+ DB_ASSERT(ctxn == NULL);
+ if (txn != NULL)
+ (void)txn->abort(txn);
+ }
+
+ /* Close the database. */
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB->close");
+ rval = 1;
+ }
+
+ /* Free allocated memory. */
+ if (subdb != NULL)
+ free(subdb);
+ if (dbtype != DB_RECNO && dbtype != DB_QUEUE)
+ free(key.data);
+ if (rkey.data != NULL)
+ free(rkey.data);
+ free(data.data);
+
+ return (rval);
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_load_db_init(dbenv, home)
+ DB_ENV *dbenv;
+ char *home;
+{
+ u_int32_t flags;
+ int ret;
+
+ /* We may be loading into a live environment. Try and join. */
+ flags = DB_USE_ENVIRON |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+ if (dbenv->open(dbenv, home, flags, 0) == 0)
+ return (0);
+
+ /*
+ * We're trying to load a database.
+ *
+ * An environment is required because we may be trying to look at
+ * databases in directories other than the current one. We could
+ * avoid using an environment iff the -h option wasn't specified,
+ * but that seems like more work than it's worth.
+ *
+ * No environment exists (or, at least no environment that includes
+ * an mpool region exists). Create one, but make it private so that
+ * no files are actually created.
+ */
+ LF_CLR(DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN);
+ LF_SET(DB_CREATE | DB_PRIVATE);
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ return (1);
+}
+
+#define FLAG(name, value, keyword, flag) \
+ if (strcmp(name, keyword) == 0) { \
+ switch (*value) { \
+ case '1': \
+ if ((ret = dbp->set_flags(dbp, flag)) != 0) { \
+ dbp->err(dbp, ret, "%s: set_flags: %s", \
+ G(progname), name); \
+ return (1); \
+ } \
+ break; \
+ case '0': \
+ break; \
+ default: \
+ db_load_badnum(dbenv); \
+ return (1); \
+ } \
+ continue; \
+ }
+#define NUMBER(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if (__db_getlong(dbp, \
+ NULL, value, 1, LONG_MAX, &val) != 0) \
+ return (1); \
+ if ((ret = dbp->func(dbp, val)) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+#define STRING(name, value, keyword, func) \
+ if (strcmp(name, keyword) == 0) { \
+ if ((ret = dbp->func(dbp, value[0])) != 0) \
+ goto nameerr; \
+ continue; \
+ }
+
+/*
+ * configure --
+ * Handle command-line configuration options.
+ */
+int
+db_load_configure(dbenv, dbp, clp, subdbp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ char **clp, **subdbp;
+ int *keysp;
+{
+ long val;
+ int ret, savech;
+ char *name, *value;
+
+ for (; (name = *clp) != NULL; *--value = savech, ++clp) {
+ if ((value = strchr(name, '=')) == NULL) {
+ dbp->errx(dbp,
+ "command-line configuration uses name=value format");
+ return (1);
+ }
+ savech = *value;
+ *value++ = '\0';
+
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if ((*subdbp = strdup(value)) == NULL) {
+ dbp->err(dbp, ENOMEM, NULL);
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ db_load_badnum(dbenv);
+ return (1);
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown command-line configuration keyword \"%s\"", name);
+ return (1);
+ }
+ return (0);
+
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ return (1);
+}
+
+/*
+ * rheader --
+ * Read the header message.
+ */
+int
+db_load_rheader(dbenv, dbp, dbtypep, subdbp, checkprintp, keysp)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DBTYPE *dbtypep;
+ char **subdbp;
+ int *checkprintp, *keysp;
+{
+ long val;
+ int first, linelen, buflen, ret;
+ char *buf, ch, *name, *p, *value;
+
+ *dbtypep = DB_UNKNOWN;
+ *checkprintp = 0;
+
+ /*
+ * We start with a smallish buffer; most headers are small.
+ * We may need to realloc it for a large subdatabase name.
+ */
+ buflen = 4096;
+ if ((buf = (char *)malloc(buflen)) == NULL) {
+memerr: dbp->errx(dbp, "could not allocate buffer %d", buflen);
+ return (1);
+ }
+
+ for (first = 1;; first = 0) {
+ ++G(lineno);
+
+ /* Read a line, which may be of arbitrary length, into buf. */
+ linelen = 0;
+ for (;;) {
+ if ((ch = getchar()) == EOF) {
+ if (!first || ferror(stdin))
+ goto badfmt;
+ G(endofile) = 1;
+ break;
+ }
+
+ if (ch == '\n')
+ break;
+
+ buf[linelen++] = ch;
+
+ /* If the buffer is too small, double it. */
+ if (linelen == buflen) {
+ buf = (char *)realloc(buf, buflen *= 2);
+ if (buf == NULL)
+ goto memerr;
+ }
+ }
+ if (G(endofile) == 1)
+ break;
+ buf[linelen] = '\0';
+
+
+ /* If we don't see the expected information, it's an error. */
+ if ((p = strchr(name = buf, '=')) == NULL)
+ goto badfmt;
+ *p++ = '\0';
+
+ value = p;
+
+ if (name[0] == '\0' || value[0] == '\0')
+ goto badfmt;
+
+ if (strcmp(name, "HEADER") == 0)
+ break;
+ if (strcmp(name, "VERSION") == 0) {
+ /*
+ * Version 1 didn't have a "VERSION" header line. We
+ * only support versions 1, 2, and 3 of the dump format.
+ */
+ G(version) = atoi(value);
+
+ if (G(version) > 3) {
+ dbp->errx(dbp,
+ "line %lu: VERSION %d is unsupported",
+ G(lineno), G(version));
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "format") == 0) {
+ if (strcmp(value, "bytevalue") == 0) {
+ *checkprintp = 0;
+ continue;
+ }
+ if (strcmp(value, "print") == 0) {
+ *checkprintp = 1;
+ continue;
+ }
+ goto badfmt;
+ }
+ if (strcmp(name, "type") == 0) {
+ if (strcmp(value, "btree") == 0) {
+ *dbtypep = DB_BTREE;
+ continue;
+ }
+ if (strcmp(value, "hash") == 0) {
+ *dbtypep = DB_HASH;
+ continue;
+ }
+ if (strcmp(value, "recno") == 0) {
+ *dbtypep = DB_RECNO;
+ continue;
+ }
+ if (strcmp(value, "queue") == 0) {
+ *dbtypep = DB_QUEUE;
+ continue;
+ }
+ dbp->errx(dbp, "line %lu: unknown type", G(lineno));
+ return (1);
+ }
+ if (strcmp(name, "database") == 0 ||
+ strcmp(name, "subdatabase") == 0) {
+ if ((ret = db_load_convprintable(dbenv, value, subdbp)) != 0) {
+ dbp->err(dbp, ret, "error reading db name");
+ return (1);
+ }
+ continue;
+ }
+ if (strcmp(name, "keys") == 0) {
+ if (strcmp(value, "1") == 0)
+ *keysp = 1;
+ else if (strcmp(value, "0") == 0)
+ *keysp = 0;
+ else {
+ db_load_badnum(dbenv);
+ return (1);
+ }
+ continue;
+ }
+
+#ifdef notyet
+ NUMBER(name, value, "bt_maxkey", set_bt_maxkey);
+#endif
+ NUMBER(name, value, "bt_minkey", set_bt_minkey);
+ NUMBER(name, value, "db_lorder", set_lorder);
+ NUMBER(name, value, "db_pagesize", set_pagesize);
+ NUMBER(name, value, "extentsize", set_q_extentsize);
+ FLAG(name, value, "duplicates", DB_DUP);
+ FLAG(name, value, "dupsort", DB_DUPSORT);
+ NUMBER(name, value, "h_ffactor", set_h_ffactor);
+ NUMBER(name, value, "h_nelem", set_h_nelem);
+ NUMBER(name, value, "re_len", set_re_len);
+ STRING(name, value, "re_pad", set_re_pad);
+ FLAG(name, value, "recnum", DB_RECNUM);
+ FLAG(name, value, "renumber", DB_RENUMBER);
+
+ dbp->errx(dbp,
+ "unknown input-file header configuration keyword \"%s\"", name);
+ return (1);
+ }
+ return (0);
+
+nameerr:
+ dbp->err(dbp, ret, "%s: %s=%s", G(progname), name, value);
+ return (1);
+
+badfmt:
+ dbp->errx(dbp, "line %lu: unexpected format", G(lineno));
+ return (1);
+}
+
+/*
+ * convprintable --
+ * Convert a printable-encoded string into a newly allocated string.
+ *
+ * In an ideal world, this would probably share code with dbt_rprint, but
+ * that's set up to read character-by-character (to avoid large memory
+ * allocations that aren't likely to be a problem here), and this has fewer
+ * special cases to deal with.
+ *
+ * Note that despite the printable encoding, the char * interface to this
+ * function (which is, not coincidentally, also used for database naming)
+ * means that outstr cannot contain any nuls.
+ */
+int
+db_load_convprintable(dbenv, instr, outstrp)
+ DB_ENV *dbenv;
+ char *instr, **outstrp;
+{
+ char c, *outstr;
+ int e1, e2;
+
+ /*
+ * Just malloc a string big enough for the whole input string;
+ * the output string will be smaller (or of equal length).
+ */
+ outstr = (char *)malloc(strlen(instr));
+ if (outstr == NULL)
+ return (ENOMEM);
+
+ *outstrp = outstr;
+
+ e1 = e2 = 0;
+ for ( ; *instr != '\0'; instr++)
+ if (*instr == '\\') {
+ if (*++instr == '\\') {
+ *outstr++ = '\\';
+ continue;
+ }
+ c = db_load_digitize(dbenv, *instr, &e1) << 4 |
+ db_load_digitize(dbenv, *++instr, &e2);
+ if (e1 || e2) {
+ db_load_badend(dbenv);
+ return (EINVAL);
+ }
+
+ *outstr++ = c;
+ } else
+ *outstr++ = *instr;
+
+ *outstr = '\0';
+
+ return (0);
+}
+
+
+/*
+ * dbt_rprint --
+ * Read a printable line into a DBT structure.
+ */
+int
+db_load_dbt_rprint(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, escape, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = escape = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ db_load_badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if (escape) {
+ if (c1 != '\\') {
+ if ((c2 = getchar()) == EOF) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ c1 = db_load_digitize(dbenv,
+ c1, &e) << 4 | db_load_digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ escape = 0;
+ } else
+ if (c1 == '\\') {
+ escape = 1;
+ continue;
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = c1;
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rdump --
+ * Read a byte dump line into a DBT structure.
+ */
+int
+db_load_dbt_rdump(dbenv, dbtp)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+{
+ u_int32_t len;
+ u_int8_t *p;
+ int c1, c2, e, first;
+ char buf[32];
+
+ ++G(lineno);
+
+ first = 1;
+ e = 0;
+ for (p = dbtp->data, len = 0; (c1 = getchar()) != '\n';) {
+ if (c1 == EOF) {
+ if (len == 0) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+ db_load_badend(dbenv);
+ return (1);
+ }
+ if (first) {
+ first = 0;
+ if (G(version) > 1) {
+ if (c1 != ' ') {
+ buf[0] = c1;
+ if (fgets(buf + 1,
+ sizeof(buf) - 1, stdin) == NULL ||
+ strcmp(buf, "DATA=END\n") != 0) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ G(endodata) = 1;
+ return (0);
+ }
+ continue;
+ }
+ }
+ if ((c2 = getchar()) == EOF) {
+ db_load_badend(dbenv);
+ return (1);
+ }
+ if (len >= dbtp->ulen - 10) {
+ dbtp->ulen *= 2;
+ if ((dbtp->data =
+ (void *)realloc(dbtp->data, dbtp->ulen)) == NULL) {
+ dbenv->err(dbenv, ENOMEM, NULL);
+ return (1);
+ }
+ p = (u_int8_t *)dbtp->data + len;
+ }
+ ++len;
+ *p++ = db_load_digitize(dbenv, c1, &e) << 4 | db_load_digitize(dbenv, c2, &e);
+ if (e)
+ return (1);
+ }
+ dbtp->size = len;
+
+ return (0);
+}
+
+/*
+ * dbt_rrecno --
+ * Read a record number dump line into a DBT structure.
+ */
+int
+db_load_dbt_rrecno(dbenv, dbtp, ishex)
+ DB_ENV *dbenv;
+ DBT *dbtp;
+ int ishex;
+{
+ char buf[32], *p, *q;
+
+ ++G(lineno);
+
+ if (fgets(buf, sizeof(buf), stdin) == NULL) {
+ G(endofile) = G(endodata) = 1;
+ return (0);
+ }
+
+ if (strcmp(buf, "DATA=END\n") == 0) {
+ G(endodata) = 1;
+ return (0);
+ }
+
+ if (buf[0] != ' ')
+ goto bad;
+
+ /*
+ * If we're expecting a hex key, do an in-place conversion
+ * of hex to straight ASCII before calling __db_getulong().
+ */
+ if (ishex) {
+ for (p = q = buf + 1; *q != '\0' && *q != '\n';) {
+ /*
+ * 0-9 in hex are 0x30-0x39, so this is easy.
+ * We should alternate between 3's and [0-9], and
+ * if the [0-9] are something unexpected,
+ * __db_getulong will fail, so we only need to catch
+ * end-of-string conditions.
+ */
+ if (*q++ != '3')
+ goto bad;
+ if (*q == '\n' || *q == '\0')
+ goto bad;
+ *p++ = *q++;
+ }
+ *p = '\0';
+ }
+
+ if (__db_getulong(NULL,
+ G(progname), buf + 1, 0, 0, (u_long *)dbtp->data)) {
+bad: db_load_badend(dbenv);
+ return (1);
+ }
+
+ dbtp->size = sizeof(db_recno_t);
+ return (0);
+}
+
+/*
+ * digitize --
+ * Convert a character to an integer.
+ */
+int
+db_load_digitize(dbenv, c, errorp)
+ DB_ENV *dbenv;
+ int c, *errorp;
+{
+ switch (c) { /* Don't depend on ASCII ordering. */
+ case '0': return (0);
+ case '1': return (1);
+ case '2': return (2);
+ case '3': return (3);
+ case '4': return (4);
+ case '5': return (5);
+ case '6': return (6);
+ case '7': return (7);
+ case '8': return (8);
+ case '9': return (9);
+ case 'a': return (10);
+ case 'b': return (11);
+ case 'c': return (12);
+ case 'd': return (13);
+ case 'e': return (14);
+ case 'f': return (15);
+ }
+
+ dbenv->errx(dbenv, "unexpected hexadecimal value");
+ *errorp = 1;
+
+ return (0);
+}
+
+/*
+ * badnum --
+ * Display the bad number message.
+ */
+void
+db_load_badnum(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv,
+ "boolean name=value pairs require a value of 0 or 1");
+}
+
+/*
+ * badend --
+ * Display the bad end to input message.
+ */
+void
+db_load_badend(dbenv)
+ DB_ENV *dbenv;
+{
+ dbenv->errx(dbenv, "unexpected end of input data or key/data pair");
+}
+
+/*
+ * usage --
+ * Display the usage message.
+ */
+int
+db_load_usage()
+{
+ (void)fprintf(stderr, "%s\n\t%s\n",
+ "usage: db_load [-nTV]",
+ "[-c name=value] [-f file] [-h home] [-t btree | hash | recno] db_file");
+ return (EXIT_FAILURE);
+}
+
+int
+db_load_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_load/db_load.wpj b/db/build_vxworks/db_load/db_load.wpj
new file mode 100644
index 000000000..51f2fbff1
--- /dev/null
+++ b/db/build_vxworks/db_load/db_load.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_load.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_load.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_load.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_load.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_load.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> FILE_db_load.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_load.c
+<END>
+
+<BEGIN> userComments
+db_load
+<END>
diff --git a/db/build_vxworks/db_load/db_load/Makefile.component b/db/build_vxworks/db_load/db_load/Makefile.component
new file mode 100644
index 000000000..34a0dec6c
--- /dev/null
+++ b/db/build_vxworks/db_load/db_load/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_load
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_load.o \
+ compConfig.o
+COMPONENT_OBJS = db_load.o
+DEPENDENCY_FILES = db_load.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_load.o \
+ compConfig.o
+COMPONENT_OBJS = db_load.o
+DEPENDENCY_FILES = db_load.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_load.o \
+ compConfig.o
+COMPONENT_OBJS = db_load.o
+DEPENDENCY_FILES = db_load.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_load.o \
+ compConfig.o
+COMPONENT_OBJS = db_load.o
+DEPENDENCY_FILES = db_load.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_load.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_load modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_load.c
+ $(COMPILE_TRADITIONAL) db_load_ctdt.c -o db_load_ctdt.o
+ $(LD) -r -o db_load.tmp $@ db_load_ctdt.o
+ $(RM) $@
+ $(MV) db_load.tmp $@
+#
+# Adds entry point table section to db_load component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_load modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_load.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_load.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_load.o: >> $@
+ $(NM) db_load.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_load.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_load/db_load/Makefile.custom b/db/build_vxworks/db_load/db_load/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_load/db_load/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_load/db_load/component.cdf b/db/build_vxworks/db_load/db_load/component.cdf
new file mode 100644
index 000000000..7d1d2bc95
--- /dev/null
+++ b/db/build_vxworks/db_load/db_load/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_LOAD {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_load.o
+ NAME db_load
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_load.o {
+
+ NAME db_load.o
+ SRC_PATH_NAME $PRJ_DIR/../db_load.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_load/db_load/component.wpj b/db/build_vxworks/db_load/db_load/component.wpj
new file mode 100644
index 000000000..31be021e7
--- /dev/null
+++ b/db/build_vxworks/db_load/db_load/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_load.c_objects
+db_load.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_load_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_load.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_printlog/db_printlog.c b/db/build_vxworks/db_printlog/db_printlog.c
new file mode 100644
index 000000000..6a8787fd9
--- /dev/null
+++ b/db/build_vxworks/db_printlog/db_printlog.c
@@ -0,0 +1,238 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_printlog.c,v 11.36 2001/10/11 22:46:27 ubell Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "btree.h"
+#include "db_am.h"
+#include "hash.h"
+#include "log.h"
+#include "qam.h"
+#include "txn.h"
+#include "clib_ext.h"
+
+int db_printlog_main __P((int, char *[]));
+int db_printlog_usage __P((void));
+int db_printlog_version_check __P((const char *));
+
+int
+db_printlog(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_printlog", args, &argc, &argv);
+ return (db_printlog_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_printlog_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_printlog";
+ DB_ENV *dbenv;
+ DB_LOGC *logc;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+ DBT data;
+ DB_LSN key;
+ int ch, e_close, exitval, nflag, rflag, ret;
+ char *home;
+
+ if ((ret = db_printlog_version_check(progname)) != 0)
+ return (ret);
+
+ logc = NULL;
+ e_close = exitval = nflag = rflag = 0;
+ home = NULL;
+ dtabsize = 0;
+ dtab = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "h:NrV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'r':
+ rflag = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_printlog_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc > 0)
+ return (db_printlog_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ /*
+ * An environment is required, but as all we're doing is reading log
+ * files, we create one if it doesn't already exist. If we create
+ * it, create it private so it automatically goes away when we're done.
+ */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOG | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ /* Initialize print callbacks. */
+ if ((ret = __bam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __crdel_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __db_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __qam_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __ham_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __log_init_print(dbenv, &dtab, &dtabsize)) != 0 ||
+ (ret = __txn_init_print(dbenv, &dtab, &dtabsize)) != 0) {
+ dbenv->err(dbenv, ret, "callback: initialization");
+ goto shutdown;
+ }
+
+ /* Allocate a log cursor. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->log_cursor");
+ goto shutdown;
+ }
+
+ memset(&data, 0, sizeof(data));
+ while (!__db_util_interrupted()) {
+ if ((ret = logc->get(
+ logc, &key, &data, rflag ? DB_PREV : DB_NEXT)) != 0) {
+ if (ret == DB_NOTFOUND)
+ break;
+ dbenv->err(dbenv, ret, "DB_LOGC->get");
+ goto shutdown;
+ }
+
+ /*
+ * XXX
+ * We use DB_TXN_ABORT as our op because that's the only op
+ * that calls the underlying recovery function without any
+ * consideration as to the contents of the transaction list.
+ */
+ ret =
+ __db_dispatch(dbenv, dtab, &data, &key, DB_TXN_ABORT, NULL);
+
+ /*
+ * XXX
+ * Just in case the underlying routines don't flush.
+ */
+ (void)fflush(stdout);
+
+ if (ret != 0) {
+ dbenv->err(dbenv, ret, "tx: dispatch");
+ goto shutdown;
+ }
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (logc != NULL && (ret = logc->close(logc, 0)) != 0)
+ exitval = 1;
+
+ if (dtab != NULL)
+ __os_free(dbenv, dtab, 0);
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_printlog_usage()
+{
+ fprintf(stderr, "usage: db_printlog [-NrV] [-h home]\n");
+ return (EXIT_FAILURE);
+}
+
+int
+db_printlog_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_printlog/db_printlog.wpj b/db/build_vxworks/db_printlog/db_printlog.wpj
new file mode 100644
index 000000000..761ac83b3
--- /dev/null
+++ b/db/build_vxworks/db_printlog/db_printlog.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_printlog.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_printlog.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_printlog.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_printlog.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_printlog.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> FILE_db_printlog.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_printlog.c
+<END>
+
+<BEGIN> userComments
+db_printlog
+<END>
diff --git a/db/build_vxworks/db_printlog/db_printlog/Makefile.component b/db/build_vxworks/db_printlog/db_printlog/Makefile.component
new file mode 100644
index 000000000..1a954a898
--- /dev/null
+++ b/db/build_vxworks/db_printlog/db_printlog/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_printlog
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_printlog.o \
+ compConfig.o
+COMPONENT_OBJS = db_printlog.o
+DEPENDENCY_FILES = db_printlog.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_printlog.o \
+ compConfig.o
+COMPONENT_OBJS = db_printlog.o
+DEPENDENCY_FILES = db_printlog.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_printlog.o \
+ compConfig.o
+COMPONENT_OBJS = db_printlog.o
+DEPENDENCY_FILES = db_printlog.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_printlog.o \
+ compConfig.o
+COMPONENT_OBJS = db_printlog.o
+DEPENDENCY_FILES = db_printlog.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_printlog.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_printlog modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_printlog.c
+ $(COMPILE_TRADITIONAL) db_printlog_ctdt.c -o db_printlog_ctdt.o
+ $(LD) -r -o db_printlog.tmp $@ db_printlog_ctdt.o
+ $(RM) $@
+ $(MV) db_printlog.tmp $@
+#
+# Adds entry point table section to db_printlog component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_printlog modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_printlog.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_printlog.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_printlog.o: >> $@
+ $(NM) db_printlog.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_printlog.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_printlog/db_printlog/Makefile.custom b/db/build_vxworks/db_printlog/db_printlog/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_printlog/db_printlog/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_printlog/db_printlog/component.cdf b/db/build_vxworks/db_printlog/db_printlog/component.cdf
new file mode 100644
index 000000000..57c645259
--- /dev/null
+++ b/db/build_vxworks/db_printlog/db_printlog/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_PRINTLOG {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_printlog.o
+ NAME db_printlog
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_printlog.o {
+
+ NAME db_printlog.o
+ SRC_PATH_NAME $PRJ_DIR/../db_printlog.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_printlog/db_printlog/component.wpj b/db/build_vxworks/db_printlog/db_printlog/component.wpj
new file mode 100644
index 000000000..2117b5199
--- /dev/null
+++ b/db/build_vxworks/db_printlog/db_printlog/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_printlog.c_objects
+db_printlog.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_printlog_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_printlog.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_recover/db_recover.c b/db/build_vxworks/db_recover/db_recover.c
new file mode 100644
index 000000000..93f8c4a14
--- /dev/null
+++ b/db/build_vxworks/db_recover/db_recover.c
@@ -0,0 +1,315 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_recover.c,v 11.26 2001/09/07 13:31:18 bostic Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "txn.h"
+#include "common_ext.h"
+#include "clib_ext.h"
+
+int db_recover_main __P((int, char *[]));
+int db_recover_read_timestamp __P((const char *, char *, time_t *));
+int db_recover_usage __P((void));
+int db_recover_version_check __P((const char *));
+
+int
+db_recover(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_recover", args, &argc, &argv);
+ return (db_recover_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_recover_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_recover";
+ DB_ENV *dbenv;
+ DB_TXNREGION *region;
+ time_t now, timestamp;
+ u_int32_t flags;
+ int ch, exitval, fatal_recover, ret, retain_env, verbose;
+ char *home;
+
+ if ((ret = db_recover_version_check(progname)) != 0)
+ return (ret);
+
+ home = NULL;
+ timestamp = 0;
+ exitval = fatal_recover = retain_env = verbose = 0;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "ceh:t:Vv")) != EOF)
+ switch (ch) {
+ case 'c':
+ fatal_recover = 1;
+ break;
+ case 'e':
+ retain_env = 1;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 't':
+ if ((ret =
+ db_recover_read_timestamp(progname, optarg, &timestamp)) != 0)
+ return (ret);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'v':
+ verbose = 1;
+ break;
+ case '?':
+ default:
+ return (db_recover_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc != 0)
+ return (db_recover_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ return (EXIT_FAILURE);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ if (verbose) {
+ (void)dbenv->set_verbose(dbenv, DB_VERB_RECOVERY, 1);
+ (void)dbenv->set_verbose(dbenv, DB_VERB_CHKPOINT, 1);
+ }
+ if (timestamp &&
+ (ret = dbenv->set_tx_timestamp(dbenv, &timestamp)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->set_timestamp");
+ goto shutdown;
+ }
+
+ /*
+ * Initialize the environment -- we don't actually do anything
+ * else, that all that's needed to run recovery.
+ *
+ * Note that unless the caller specified the -e option, we use a
+ * private environment, as we're about to create a region, and we
+ * don't want to to leave it around. If we leave the region around,
+ * the application that should create it will simply join it instead,
+ * and will then be running with incorrectly sized (and probably
+ * terribly small) caches. Applications that use -e should almost
+ * certainly use DB_CONFIG files in the directory.
+ */
+ flags = 0;
+ LF_SET(DB_CREATE | DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL | DB_INIT_TXN | DB_USE_ENVIRON);
+ LF_SET(fatal_recover ? DB_RECOVER_FATAL : DB_RECOVER);
+ LF_SET(retain_env ? 0 : DB_PRIVATE);
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open");
+ goto shutdown;
+ }
+
+ if (verbose) {
+ (void)time(&now);
+ region = ((DB_TXNMGR *)dbenv->tx_handle)->reginfo.primary;
+ dbenv->errx(dbenv, "Recovery complete at %.24s", ctime(&now));
+ dbenv->errx(dbenv, "%s %lx %s [%lu][%lu]",
+ "Maximum transaction id", (u_long)region->last_txnid,
+ "Recovery checkpoint", (u_long)region->last_ckp.file,
+ (u_long)region->last_ckp.offset);
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+
+ /* Clean up the environment. */
+ if ((ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+#define ATOI2(ar) ((ar)[0] - '0') * 10 + ((ar)[1] - '0'); (ar) += 2;
+
+/*
+ * read_timestamp --
+ * Convert a time argument to Epoch seconds.
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+int
+db_recover_read_timestamp(progname, arg, timep)
+ const char *progname;
+ char *arg;
+ time_t *timep;
+{
+ struct tm *t;
+ time_t now;
+ int yearset;
+ char *p;
+ /* Start with the current time. */
+ (void)time(&now);
+ if ((t = localtime(&now)) == NULL) {
+ fprintf(stderr,
+ "%s: localtime: %s\n", progname, strerror(errno));
+ return (EXIT_FAILURE);
+ }
+ /* [[CC]YY]MMDDhhmm[.SS] */
+ if ((p = strchr(arg, '.')) == NULL)
+ t->tm_sec = 0; /* Seconds defaults to 0. */
+ else {
+ if (strlen(p + 1) != 2)
+ goto terr;
+ *p++ = '\0';
+ t->tm_sec = ATOI2(p);
+ }
+
+ yearset = 0;
+ switch(strlen(arg)) {
+ case 12: /* CCYYMMDDhhmm */
+ t->tm_year = ATOI2(arg);
+ t->tm_year *= 100;
+ yearset = 1;
+ /* FALLTHROUGH */
+ case 10: /* YYMMDDhhmm */
+ if (yearset) {
+ yearset = ATOI2(arg);
+ t->tm_year += yearset;
+ } else {
+ yearset = ATOI2(arg);
+ if (yearset < 69)
+ t->tm_year = yearset + 2000;
+ else
+ t->tm_year = yearset + 1900;
+ }
+ t->tm_year -= 1900; /* Convert to UNIX time. */
+ /* FALLTHROUGH */
+ case 8: /* MMDDhhmm */
+ t->tm_mon = ATOI2(arg);
+ --t->tm_mon; /* Convert from 01-12 to 00-11 */
+ t->tm_mday = ATOI2(arg);
+ t->tm_hour = ATOI2(arg);
+ t->tm_min = ATOI2(arg);
+ break;
+ default:
+ goto terr;
+ }
+
+ t->tm_isdst = -1; /* Figure out DST. */
+
+ *timep = mktime(t);
+ if (*timep == -1) {
+terr: fprintf(stderr,
+ "%s: out of range or illegal time specification: [[CC]YY]MMDDhhmm[.SS]",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
+
+int
+db_recover_usage()
+{
+ (void)fprintf(stderr,
+ "usage: db_recover [-ceVv] [-h home] [-t [[CC]YY]MMDDhhmm[.SS]]\n");
+ return (EXIT_FAILURE);
+}
+
+int
+db_recover_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_recover/db_recover.wpj b/db/build_vxworks/db_recover/db_recover.wpj
new file mode 100644
index 000000000..c7752ec5b
--- /dev/null
+++ b/db/build_vxworks/db_recover/db_recover.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_recover.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_recover.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_recover.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_recover.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_recover.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> FILE_db_recover.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_recover.c
+<END>
+
+<BEGIN> userComments
+db_recover
+<END>
diff --git a/db/build_vxworks/db_recover/db_recover/Makefile.component b/db/build_vxworks/db_recover/db_recover/Makefile.component
new file mode 100644
index 000000000..355683201
--- /dev/null
+++ b/db/build_vxworks/db_recover/db_recover/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_recover
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_recover.o \
+ compConfig.o
+COMPONENT_OBJS = db_recover.o
+DEPENDENCY_FILES = db_recover.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_recover.o \
+ compConfig.o
+COMPONENT_OBJS = db_recover.o
+DEPENDENCY_FILES = db_recover.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_recover.o \
+ compConfig.o
+COMPONENT_OBJS = db_recover.o
+DEPENDENCY_FILES = db_recover.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_recover.o \
+ compConfig.o
+COMPONENT_OBJS = db_recover.o
+DEPENDENCY_FILES = db_recover.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_recover.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_recover modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_recover.c
+ $(COMPILE_TRADITIONAL) db_recover_ctdt.c -o db_recover_ctdt.o
+ $(LD) -r -o db_recover.tmp $@ db_recover_ctdt.o
+ $(RM) $@
+ $(MV) db_recover.tmp $@
+#
+# Adds entry point table section to db_recover component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_recover modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_recover.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_recover.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_recover.o: >> $@
+ $(NM) db_recover.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_recover.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_recover/db_recover/Makefile.custom b/db/build_vxworks/db_recover/db_recover/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_recover/db_recover/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_recover/db_recover/component.cdf b/db/build_vxworks/db_recover/db_recover/component.cdf
new file mode 100644
index 000000000..d322bf4a8
--- /dev/null
+++ b/db/build_vxworks/db_recover/db_recover/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_RECOVER {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_recover.o
+ NAME db_recover
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_recover.o {
+
+ NAME db_recover.o
+ SRC_PATH_NAME $PRJ_DIR/../db_recover.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_recover/db_recover/component.wpj b/db/build_vxworks/db_recover/db_recover/component.wpj
new file mode 100644
index 000000000..169a5b543
--- /dev/null
+++ b/db/build_vxworks/db_recover/db_recover/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_recover.c_objects
+db_recover.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_recover_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_recover.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_stat/db_stat.c b/db/build_vxworks/db_stat/db_stat.c
new file mode 100644
index 000000000..dbfc35743
--- /dev/null
+++ b/db/build_vxworks/db_stat/db_stat.c
@@ -0,0 +1,1085 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_stat.c,v 11.81 2001/10/11 18:56:35 bostic Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_shash.h"
+#include "lock.h"
+#include "mp.h"
+#include "clib_ext.h"
+
+#define PCT(f, t, pgsize) \
+ ((t) == 0 ? 0 : \
+ (((double)(((t) * (pgsize)) - (f)) / ((t) * (pgsize))) * 100))
+
+typedef enum { T_NOTSET, T_DB, T_ENV, T_LOCK, T_LOG, T_MPOOL, T_TXN } test_t;
+
+int db_stat_argcheck __P((char *, const char *));
+int db_stat_btree_stats __P((DB_ENV *, DB *, DB_BTREE_STAT *, int));
+int db_stat_db_init __P((DB_ENV *, char *, test_t));
+void db_stat_dl __P((const char *, u_long));
+void db_stat_dl_bytes __P((const char *, u_long, u_long, u_long));
+int db_stat_env_stats __P((DB_ENV *, u_int32_t));
+int db_stat_hash_stats __P((DB_ENV *, DB *, int));
+int db_stat_lock_stats __P((DB_ENV *, char *, u_int32_t));
+int db_stat_log_stats __P((DB_ENV *, u_int32_t));
+int db_stat_main __P((int, char *[]));
+int db_stat_mpool_stats __P((DB_ENV *, char *, u_int32_t));
+void db_stat_prflags __P((u_int32_t, const FN *));
+int db_stat_queue_stats __P((DB_ENV *, DB *, int));
+int db_stat_txn_compare __P((const void *, const void *));
+int db_stat_txn_stats __P((DB_ENV *, u_int32_t));
+int db_stat_usage __P((void));
+int db_stat_version_check __P((const char *));
+
+int
+db_stat(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_stat", args, &argc, &argv);
+ return (db_stat_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_stat_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_stat";
+ DB_ENV *dbenv;
+ DB_BTREE_STAT *sp;
+ DB *alt_dbp, *dbp;
+ test_t ttype;
+ int ch, checked, d_close, e_close, exitval, fast, flags, nflag, ret;
+ char *db, *home, *internal, *subdb;
+
+ if ((ret = db_stat_version_check(progname)) != 0)
+ return (ret);
+
+ dbp = NULL;
+ ttype = T_NOTSET;
+ nflag = 0;
+ fast = 0;
+ d_close = e_close = exitval = 0;
+ db = home = internal = subdb = NULL;
+ flags = 0;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "C:cd:efh:lM:mNs:tVZ")) != EOF)
+ switch (ch) {
+ case 'C':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ if (!db_stat_argcheck(internal = optarg, "Acflmo"))
+ return (db_stat_usage());
+ break;
+ case 'c':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOCK;
+ break;
+ case 'd':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ db = optarg;
+ break;
+ case 'e':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_ENV;
+ break;
+ case 'f':
+ fast = DB_FAST_STAT;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'l':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_LOG;
+ break;
+ case 'M':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ if (!db_stat_argcheck(internal = optarg, "Ahlm"))
+ return (db_stat_usage());
+ break;
+ case 'm':
+ if (ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_MPOOL;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 's':
+ if (ttype != T_DB && ttype != T_NOTSET)
+ goto argcombo;
+ ttype = T_DB;
+ subdb = optarg;
+ break;
+ case 't':
+ if (ttype != T_NOTSET) {
+argcombo: fprintf(stderr,
+ "%s: illegal option combination\n",
+ progname);
+ return (EXIT_FAILURE);
+ }
+ ttype = T_TXN;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case 'Z':
+ flags |= DB_STAT_CLEAR;
+ break;
+ case '?':
+ default:
+ return (db_stat_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ switch (ttype) {
+ case T_DB:
+ if (db == NULL)
+ return (db_stat_usage());
+ break;
+ case T_NOTSET:
+ return (db_stat_usage());
+ /* NOTREACHED */
+ default:
+ if (fast != 0)
+ return (db_stat_usage());
+ break;
+ }
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_env_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ /* Initialize the environment. */
+ if (db_stat_db_init(dbenv, home, ttype) != 0)
+ goto shutdown;
+
+ switch (ttype) {
+ case T_DB:
+ /* Create the DB object and open the file. */
+ if (flags != 0)
+ return (db_stat_usage());
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+
+ if ((ret =
+ dbp->open(dbp, db, subdb, DB_UNKNOWN, DB_RDONLY, 0)) != 0) {
+ dbp->err(dbp, ret, "open: %s", db);
+ goto shutdown;
+ }
+
+ /*
+ * See if we can open this db read/write to update counts.
+ * If its a master-db then we cannot. So check to see,
+ * if its btree then it might be.
+ */
+ checked = 0;
+ if (subdb == NULL && dbp->type == DB_BTREE) {
+ if ((ret =
+ dbp->stat(dbp, &sp, DB_FAST_STAT)) != 0) {
+ dbp->err(dbp, ret, "dbp->stat");
+ return (EXIT_FAILURE);
+ }
+ checked = 1;
+ }
+
+ if (subdb != NULL ||
+ dbp->type != DB_BTREE ||
+ (sp->bt_metaflags & BTM_SUBDB) == 0) {
+ if ((ret = db_create(&alt_dbp, dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_create");
+ goto shutdown;
+ }
+ if ((ret = dbp->open(alt_dbp,
+ db, subdb, DB_UNKNOWN, 0, 0)) == 0) {
+ (void)dbp->close(dbp, 0);
+ dbp = alt_dbp;
+ }
+ /* Need to run again to update counts */
+ checked = 0;
+ }
+
+ d_close = 1;
+ switch (dbp->type) {
+ case DB_BTREE:
+ case DB_RECNO:
+ if (db_stat_btree_stats(
+ dbenv, dbp, checked == 1 ? sp : NULL, fast))
+ goto shutdown;
+ break;
+ case DB_HASH:
+ if (db_stat_hash_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_QUEUE:
+ if (db_stat_queue_stats(dbenv, dbp, fast))
+ goto shutdown;
+ break;
+ case DB_UNKNOWN:
+ abort(); /* Impossible. */
+ /* NOTREACHED */
+ }
+ break;
+ case T_ENV:
+ if (db_stat_env_stats(dbenv, flags))
+ exitval = 1;
+ break;
+ case T_LOCK:
+ if (db_stat_lock_stats(dbenv, internal, flags))
+ exitval = 1;
+ break;
+ case T_LOG:
+ if (db_stat_log_stats(dbenv, flags))
+ exitval = 1;
+ break;
+ case T_MPOOL:
+ if (db_stat_mpool_stats(dbenv, internal, flags))
+ exitval = 1;
+ break;
+ case T_TXN:
+ if (db_stat_txn_stats(dbenv, flags))
+ exitval = 1;
+ break;
+ case T_NOTSET:
+ abort(); /* Impossible. */
+ /* NOTREACHED */
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (d_close && (ret = dbp->close(dbp, 0)) != 0) {
+ exitval = 1;
+ dbenv->err(dbenv, ret, "close");
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+/*
+ * env_stats --
+ * Display environment statistics.
+ */
+int
+db_stat_env_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ REGENV renv;
+ REGION *rp, regs[1024];
+ int n, ret;
+ const char *lable;
+
+ n = sizeof(regs) / sizeof(regs[0]);
+ if ((ret = __db_e_stat(dbenv, &renv, regs, &n, flags)) != 0) {
+ dbenv->err(dbenv, ret, "__db_e_stat");
+ return (1);
+ }
+
+ printf("%d.%d.%d\tEnvironment version.\n",
+ renv.majver, renv.minver, renv.patch);
+ printf("%lx\tMagic number.\n", (u_long)renv.magic);
+ printf("%d\tPanic value.\n", renv.envpanic);
+
+ /* Adjust the reference count for us... */
+ printf("%d\tReferences.\n", renv.refcnt - 1);
+
+ db_stat_dl("Locks granted without waiting.\n",
+ (u_long)renv.mutex.mutex_set_nowait);
+ db_stat_dl("Locks granted after waiting.\n",
+ (u_long)renv.mutex.mutex_set_wait);
+
+ while (n > 0) {
+ printf("%s\n", DB_LINE);
+ rp = &regs[--n];
+ switch (rp->type) {
+ case REGION_TYPE_ENV:
+ lable = "Environment";
+ break;
+ case REGION_TYPE_LOCK:
+ lable = "Lock";
+ break;
+ case REGION_TYPE_LOG:
+ lable = "Log";
+ break;
+ case REGION_TYPE_MPOOL:
+ lable = "Mpool";
+ break;
+ case REGION_TYPE_MUTEX:
+ lable = "Mutex";
+ break;
+ case REGION_TYPE_TXN:
+ lable = "Txn";
+ break;
+ case INVALID_REGION_TYPE:
+ default:
+ lable = "Invalid";
+ break;
+ }
+ printf("%s Region: %d.\n", lable, rp->id);
+ db_stat_dl_bytes("Size", (u_long)0, (u_long)0, (u_long)rp->size);
+ printf("%ld\tSegment ID.\n", rp->segid);
+ db_stat_dl("Locks granted without waiting.\n",
+ (u_long)rp->mutex.mutex_set_nowait);
+ db_stat_dl("Locks granted after waiting.\n",
+ (u_long)rp->mutex.mutex_set_wait);
+ }
+
+ return (0);
+}
+
+/*
+ * btree_stats --
+ * Display btree/recno statistics.
+ */
+int
+db_stat_btree_stats(dbenv, dbp, msp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ DB_BTREE_STAT *msp;
+ int fast;
+{
+ static const FN fn[] = {
+ { BTM_DUP, "duplicates" },
+ { BTM_FIXEDLEN, "fixed-length" },
+ { BTM_RECNO, "recno" },
+ { BTM_RECNUM, "record-numbers" },
+ { BTM_RENUMBER, "renumber" },
+ { BTM_SUBDB, "multiple-databases" },
+ { 0, NULL }
+ };
+ DB_BTREE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if (msp != NULL)
+ sp = msp;
+ else if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "dbp->stat");
+ return (1);
+ }
+
+ printf("%lx\tBtree magic number.\n", (u_long)sp->bt_magic);
+ printf("%lu\tBtree version number.\n", (u_long)sp->bt_version);
+ db_stat_prflags(sp->bt_metaflags, fn);
+ if (dbp->type == DB_BTREE) {
+#ifdef NOT_IMPLEMENTED
+ db_stat_dl("Maximum keys per-page.\n", (u_long)sp->bt_maxkey);
+#endif
+ db_stat_dl("Minimum keys per-page.\n", (u_long)sp->bt_minkey);
+ }
+ if (dbp->type == DB_RECNO) {
+ db_stat_dl("Fixed-length record size.\n", (u_long)sp->bt_re_len);
+ if (isprint(sp->bt_re_pad) && !isspace(sp->bt_re_pad))
+ printf("%c\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n",
+ (int)sp->bt_re_pad);
+ }
+ db_stat_dl("Underlying database page size.\n", (u_long)sp->bt_pagesize);
+ db_stat_dl("Number of levels in the tree.\n", (u_long)sp->bt_levels);
+ db_stat_dl(dbp->type == DB_BTREE ?
+ "Number of unique keys in the tree.\n" :
+ "Number of records in the tree.\n", (u_long)sp->bt_nkeys);
+ db_stat_dl("Number of data items in the tree.\n", (u_long)sp->bt_ndata);
+
+ db_stat_dl("Number of tree internal pages.\n", (u_long)sp->bt_int_pg);
+ db_stat_dl("Number of bytes free in tree internal pages",
+ (u_long)sp->bt_int_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_int_pgfree, sp->bt_int_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of tree leaf pages.\n", (u_long)sp->bt_leaf_pg);
+ db_stat_dl("Number of bytes free in tree leaf pages",
+ (u_long)sp->bt_leaf_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_leaf_pgfree, sp->bt_leaf_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of tree duplicate pages.\n", (u_long)sp->bt_dup_pg);
+ db_stat_dl("Number of bytes free in tree duplicate pages",
+ (u_long)sp->bt_dup_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_dup_pgfree, sp->bt_dup_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of tree overflow pages.\n", (u_long)sp->bt_over_pg);
+ db_stat_dl("Number of bytes free in tree overflow pages",
+ (u_long)sp->bt_over_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->bt_over_pgfree, sp->bt_over_pg, sp->bt_pagesize));
+
+ db_stat_dl("Number of pages on the free list.\n", (u_long)sp->bt_free);
+
+ return (0);
+}
+
+/*
+ * hash_stats --
+ * Display hash statistics.
+ */
+int
+db_stat_hash_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ static const FN fn[] = {
+ { DB_HASH_DUP, "duplicates" },
+ { DB_HASH_SUBDB,"multiple-databases" },
+ { 0, NULL }
+ };
+ DB_HASH_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "dbp->stat");
+ return (1);
+ }
+
+ printf("%lx\tHash magic number.\n", (u_long)sp->hash_magic);
+ printf("%lu\tHash version number.\n", (u_long)sp->hash_version);
+ db_stat_prflags(sp->hash_metaflags, fn);
+ db_stat_dl("Underlying database page size.\n", (u_long)sp->hash_pagesize);
+ db_stat_dl("Specified number of elements.\n", (u_long)sp->hash_nelem);
+ db_stat_dl("Specified fill factor.\n", (u_long)sp->hash_ffactor);
+ db_stat_dl("Number of keys in the database.\n", (u_long)sp->hash_nkeys);
+ db_stat_dl("Number of data items in the database.\n", (u_long)sp->hash_ndata);
+
+ db_stat_dl("Number of hash buckets.\n", (u_long)sp->hash_buckets);
+ db_stat_dl("Number of bytes free on bucket pages", (u_long)sp->hash_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_bfree, sp->hash_buckets, sp->hash_pagesize));
+
+ db_stat_dl("Number of overflow pages.\n", (u_long)sp->hash_bigpages);
+ db_stat_dl("Number of bytes free in overflow pages",
+ (u_long)sp->hash_big_bfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_big_bfree, sp->hash_bigpages, sp->hash_pagesize));
+
+ db_stat_dl("Number of bucket overflow pages.\n", (u_long)sp->hash_overflows);
+ db_stat_dl("Number of bytes free in bucket overflow pages",
+ (u_long)sp->hash_ovfl_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_ovfl_free, sp->hash_overflows, sp->hash_pagesize));
+
+ db_stat_dl("Number of duplicate pages.\n", (u_long)sp->hash_dup);
+ db_stat_dl("Number of bytes free in duplicate pages",
+ (u_long)sp->hash_dup_free);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->hash_dup_free, sp->hash_dup, sp->hash_pagesize));
+
+ db_stat_dl("Number of pages on the free list.\n", (u_long)sp->hash_free);
+
+ return (0);
+}
+
+/*
+ * queue_stats --
+ * Display queue statistics.
+ */
+int
+db_stat_queue_stats(dbenv, dbp, fast)
+ DB_ENV *dbenv;
+ DB *dbp;
+ int fast;
+{
+ DB_QUEUE_STAT *sp;
+ int ret;
+
+ COMPQUIET(dbenv, NULL);
+
+ if ((ret = dbp->stat(dbp, &sp, fast)) != 0) {
+ dbp->err(dbp, ret, "dbp->stat");
+ return (1);
+ }
+
+ printf("%lx\tQueue magic number.\n", (u_long)sp->qs_magic);
+ printf("%lu\tQueue version number.\n", (u_long)sp->qs_version);
+ db_stat_dl("Fixed-length record size.\n", (u_long)sp->qs_re_len);
+ if (isprint(sp->qs_re_pad) && !isspace(sp->qs_re_pad))
+ printf("%c\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ else
+ printf("0x%x\tFixed-length record pad.\n", (int)sp->qs_re_pad);
+ db_stat_dl("Underlying database page size.\n", (u_long)sp->qs_pagesize);
+ if (sp->qs_extentsize != 0)
+ db_stat_dl("Underlying database extent size.\n",
+ (u_long)sp->qs_extentsize);
+ db_stat_dl("Number of records in the database.\n", (u_long)sp->qs_nkeys);
+ db_stat_dl("Number of database pages.\n", (u_long)sp->qs_pages);
+ db_stat_dl("Number of bytes free in database pages", (u_long)sp->qs_pgfree);
+ printf(" (%.0f%% ff).\n",
+ PCT(sp->qs_pgfree, sp->qs_pages, sp->qs_pagesize));
+ printf("%lu\tFirst undeleted record.\n", (u_long)sp->qs_first_recno);
+ printf(
+ "%lu\tLast allocated record number.\n", (u_long)sp->qs_cur_recno);
+
+ return (0);
+}
+
+/*
+ * lock_stats --
+ * Display lock statistics.
+ */
+int
+db_stat_lock_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_LOCK_STAT *sp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->lock_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->lock_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ printf("%lu Last allocated locker ID.\n", (u_long)sp->st_lastid);
+ db_stat_dl("Number of lock modes.\n", (u_long)sp->st_nmodes);
+ db_stat_dl("Maximum number of locks possible.\n", (u_long)sp->st_maxlocks);
+ db_stat_dl("Maximum number of lockers possible.\n", (u_long)sp->st_maxlockers);
+ db_stat_dl("Maximum number of objects possible.\n", (u_long)sp->st_maxobjects);
+ db_stat_dl("Current locks.\n", (u_long)sp->st_nlocks);
+ db_stat_dl("Maximum number of locks so far.\n", (u_long)sp->st_maxnlocks);
+ db_stat_dl("Current number of lockers.\n", (u_long)sp->st_nlockers);
+ db_stat_dl("Maximum number lockers so far.\n", (u_long)sp->st_maxnlockers);
+ db_stat_dl("Current number lock objects.\n", (u_long)sp->st_nobjects);
+ db_stat_dl("Maximum number of lock objects so far.\n",
+ (u_long)sp->st_maxnobjects);
+ db_stat_dl("Number of lock requests.\n", (u_long)sp->st_nrequests);
+ db_stat_dl("Number of lock releases.\n", (u_long)sp->st_nreleases);
+ db_stat_dl("Number of lock requests that would have waited.\n",
+ (u_long)sp->st_nnowaits);
+ db_stat_dl("Number of lock conflicts.\n", (u_long)sp->st_nconflicts);
+ db_stat_dl("Number of deadlocks.\n", (u_long)sp->st_ndeadlocks);
+ db_stat_dl("Number of transaction timeouts.\n", (u_long)sp->st_ntxntimeouts);
+ db_stat_dl("Number of lock timeouts.\n", (u_long)sp->st_nlocktimeouts);
+
+ db_stat_dl_bytes("Lock region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ return (0);
+}
+
+/*
+ * log_stats --
+ * Display log statistics.
+ */
+int
+db_stat_log_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_LOG_STAT *sp;
+ int ret;
+
+ if ((ret = dbenv->log_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ printf("%lx\tLog magic number.\n", (u_long)sp->st_magic);
+ printf("%lu\tLog version number.\n", (u_long)sp->st_version);
+ db_stat_dl_bytes("Log record cache size",
+ (u_long)0, (u_long)0, (u_long)sp->st_lg_bsize);
+ printf("%#o\tLog file mode.\n", sp->st_mode);
+ if (sp->st_lg_max % MEGABYTE == 0)
+ printf("%luMb\tLog file size.\n",
+ (u_long)sp->st_lg_max / MEGABYTE);
+ else if (sp->st_lg_max % 1024 == 0)
+ printf("%luKb\tLog file size.\n", (u_long)sp->st_lg_max / 1024);
+ else
+ printf("%lu\tLog file size.\n", (u_long)sp->st_lg_max);
+ db_stat_dl_bytes("Log bytes written",
+ (u_long)0, (u_long)sp->st_w_mbytes, (u_long)sp->st_w_bytes);
+ db_stat_dl_bytes("Log bytes written since last checkpoint",
+ (u_long)0, (u_long)sp->st_wc_mbytes, (u_long)sp->st_wc_bytes);
+ db_stat_dl("Total log file writes.\n", (u_long)sp->st_wcount);
+ db_stat_dl("Total log file write due to overflow.\n",
+ (u_long)sp->st_wcount_fill);
+ db_stat_dl("Total log file flushes.\n", (u_long)sp->st_scount);
+ printf("%lu\tCurrent log file number.\n", (u_long)sp->st_cur_file);
+ printf("%lu\tCurrent log file offset.\n", (u_long)sp->st_cur_offset);
+ printf("%lu\tOn-disk log file number.\n", (u_long)sp->st_disk_file);
+ printf("%lu\tOn-disk log file offset.\n", (u_long)sp->st_disk_offset);
+
+ db_stat_dl("Max commits in a log flush.\n", (u_long)sp->st_maxcommitperflush);
+ db_stat_dl("Min commits in a log flush.\n", (u_long)sp->st_mincommitperflush);
+ db_stat_dl("Number of log flushes containing a transaction commit.\n",
+ (u_long)sp->st_flushcommit);
+
+ db_stat_dl_bytes("Log region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ return (0);
+}
+
+/*
+ * mpool_stats --
+ * Display mpool statistics.
+ */
+int
+db_stat_mpool_stats(dbenv, internal, flags)
+ DB_ENV *dbenv;
+ char *internal;
+ u_int32_t flags;
+{
+ DB_MPOOL_FSTAT **fsp;
+ DB_MPOOL_STAT *gsp;
+ int ret;
+
+ if (internal != NULL) {
+ if ((ret =
+ dbenv->memp_dump_region(dbenv, internal, stdout)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+ return (0);
+ }
+
+ if ((ret = dbenv->memp_stat(dbenv, &gsp, &fsp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ db_stat_dl_bytes("Total cache size",
+ (u_long)gsp->st_gbytes, (u_long)0, (u_long)gsp->st_bytes);
+ db_stat_dl("Number of caches.\n", (u_long)gsp->st_ncache);
+ db_stat_dl("Pool individual cache size.\n", (u_long)gsp->st_regsize);
+ db_stat_dl("Requested pages found in the cache", (u_long)gsp->st_cache_hit);
+ if (gsp->st_cache_hit + gsp->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)gsp->st_cache_hit /
+ (gsp->st_cache_hit + gsp->st_cache_miss)) * 100);
+ printf(".\n");
+ db_stat_dl("Requested pages mapped into the process' address space.\n",
+ (u_long)gsp->st_map);
+ db_stat_dl("Requested pages not found in the cache.\n",
+ (u_long)gsp->st_cache_miss);
+ db_stat_dl("Pages created in the cache.\n", (u_long)gsp->st_page_create);
+ db_stat_dl("Pages read into the cache.\n", (u_long)gsp->st_page_in);
+ db_stat_dl("Pages written from the cache to the backing file.\n",
+ (u_long)gsp->st_page_out);
+ db_stat_dl("Clean pages forced from the cache.\n",
+ (u_long)gsp->st_ro_evict);
+ db_stat_dl("Dirty pages forced from the cache.\n",
+ (u_long)gsp->st_rw_evict);
+ db_stat_dl("Dirty buffers written by trickle-sync thread.\n",
+ (u_long)gsp->st_page_trickle);
+ db_stat_dl("Current clean buffer count.\n",
+ (u_long)gsp->st_page_clean);
+ db_stat_dl("Current dirty buffer count.\n",
+ (u_long)gsp->st_page_dirty);
+ db_stat_dl("Number of hash buckets used for page location.\n",
+ (u_long)gsp->st_hash_buckets);
+ db_stat_dl("Total number of times hash chains searched for a page.\n",
+ (u_long)gsp->st_hash_searches);
+ db_stat_dl("The longest hash chain searched for a page.\n",
+ (u_long)gsp->st_hash_longest);
+ db_stat_dl("Total number of hash buckets examined for page location.\n",
+ (u_long)gsp->st_hash_examined);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)gsp->st_region_nowait);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)gsp->st_region_wait);
+
+ for (; fsp != NULL && *fsp != NULL; ++fsp) {
+ printf("%s\n", DB_LINE);
+ printf("Pool File: %s\n", (*fsp)->file_name);
+ db_stat_dl("Page size.\n", (u_long)(*fsp)->st_pagesize);
+ db_stat_dl("Requested pages found in the cache",
+ (u_long)(*fsp)->st_cache_hit);
+ if ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss != 0)
+ printf(" (%.0f%%)", ((double)(*fsp)->st_cache_hit /
+ ((*fsp)->st_cache_hit + (*fsp)->st_cache_miss)) *
+ 100);
+ printf(".\n");
+ db_stat_dl("Requested pages mapped into the process' address space.\n",
+ (u_long)(*fsp)->st_map);
+ db_stat_dl("Requested pages not found in the cache.\n",
+ (u_long)(*fsp)->st_cache_miss);
+ db_stat_dl("Pages created in the cache.\n",
+ (u_long)(*fsp)->st_page_create);
+ db_stat_dl("Pages read into the cache.\n",
+ (u_long)(*fsp)->st_page_in);
+ db_stat_dl("Pages written from the cache to the backing file.\n",
+ (u_long)(*fsp)->st_page_out);
+ }
+
+ return (0);
+}
+
+/*
+ * txn_stats --
+ * Display transaction statistics.
+ */
+int
+db_stat_txn_stats(dbenv, flags)
+ DB_ENV *dbenv;
+ u_int32_t flags;
+{
+ DB_TXN_STAT *sp;
+ u_int32_t i;
+ int ret;
+ const char *p;
+
+ if ((ret = dbenv->txn_stat(dbenv, &sp, flags)) != 0) {
+ dbenv->err(dbenv, ret, NULL);
+ return (1);
+ }
+
+ p = sp->st_last_ckp.file == 0 ?
+ "No checkpoint LSN." : "File/offset for last checkpoint LSN.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_last_ckp.file, (u_long)sp->st_last_ckp.offset, p);
+ p = sp->st_pending_ckp.file == 0 ?
+ "No pending checkpoint LSN." :
+ "File/offset for last pending checkpoint LSN.";
+ printf("%lu/%lu\t%s\n",
+ (u_long)sp->st_pending_ckp.file,
+ (u_long)sp->st_pending_ckp.offset, p);
+ if (sp->st_time_ckp == 0)
+ printf("0\tNo checkpoint timestamp.\n");
+ else
+ printf("%.24s\tCheckpoint timestamp.\n",
+ ctime(&sp->st_time_ckp));
+ printf("%lx\tLast transaction ID allocated.\n",
+ (u_long)sp->st_last_txnid);
+ db_stat_dl("Maximum number of active transactions possible.\n",
+ (u_long)sp->st_maxtxns);
+ db_stat_dl("Active transactions.\n", (u_long)sp->st_nactive);
+ db_stat_dl("Maximum active transactions.\n", (u_long)sp->st_maxnactive);
+ db_stat_dl("Number of transactions begun.\n", (u_long)sp->st_nbegins);
+ db_stat_dl("Number of transactions aborted.\n", (u_long)sp->st_naborts);
+ db_stat_dl("Number of transactions committed.\n", (u_long)sp->st_ncommits);
+ db_stat_dl("Number of transactions restored.\n", (u_long)sp->st_nrestores);
+
+ db_stat_dl_bytes("Transaction region size",
+ (u_long)0, (u_long)0, (u_long)sp->st_regsize);
+ db_stat_dl("The number of region locks granted after waiting.\n",
+ (u_long)sp->st_region_wait);
+ db_stat_dl("The number of region locks granted without waiting.\n",
+ (u_long)sp->st_region_nowait);
+
+ qsort(sp->st_txnarray,
+ sp->st_nactive, sizeof(sp->st_txnarray[0]), db_stat_txn_compare);
+ for (i = 0; i < sp->st_nactive; ++i) {
+ printf("\tid: %lx; initial LSN file/offset %lu/%lu",
+ (u_long)sp->st_txnarray[i].txnid,
+ (u_long)sp->st_txnarray[i].lsn.file,
+ (u_long)sp->st_txnarray[i].lsn.offset);
+ if (sp->st_txnarray[i].parentid == 0)
+ printf("\n");
+ else
+ printf(" parent: %lx\n",
+ (u_long)sp->st_txnarray[i].parentid);
+ }
+
+ return (0);
+}
+
+int
+db_stat_txn_compare(a1, b1)
+ const void *a1, *b1;
+{
+ const DB_TXN_ACTIVE *a, *b;
+
+ a = a1;
+ b = b1;
+
+ if (a->txnid > b->txnid)
+ return (1);
+ if (a->txnid < b->txnid)
+ return (-1);
+ return (0);
+}
+
+/*
+ * dl --
+ * Display a big value.
+ */
+void
+db_stat_dl(msg, value)
+ const char *msg;
+ u_long value;
+{
+ /*
+ * Two formats: if less than 10 million, display as the number, if
+ * greater than 10 million display as ###M.
+ */
+ if (value < 10000000)
+ printf("%lu\t%s", value, msg);
+ else
+ printf("%luM\t%s", value / 1000000, msg);
+}
+
+/*
+ * dl_bytes --
+ * Display a big number of bytes.
+ */
+void
+db_stat_dl_bytes(msg, gbytes, mbytes, bytes)
+ const char *msg;
+ u_long gbytes, mbytes, bytes;
+{
+ const char *sep;
+ u_long sbytes;
+ int showbytes;
+
+ sbytes = bytes;
+ while (bytes > MEGABYTE) {
+ ++mbytes;
+ bytes -= MEGABYTE;
+ }
+ while (mbytes > GIGABYTE / MEGABYTE) {
+ ++gbytes;
+ --mbytes;
+ }
+
+ sep = "";
+ showbytes = 0;
+ if (gbytes > 0) {
+ printf("%luGB", gbytes);
+ sep = " ";
+ showbytes = 1;
+ }
+ if (mbytes > 0) {
+ printf("%s%luMB", sep, mbytes);
+ sep = " ";
+ showbytes = 1;
+ }
+ if (bytes > 1024) {
+ printf("%s%luKB", sep, bytes / 1024);
+ bytes %= 1024;
+ sep = " ";
+ showbytes = 1;
+ }
+ if (bytes > 0)
+ printf("%s%luB", sep, bytes);
+ else
+ if (!showbytes)
+ printf("%s%lu", sep, bytes);
+
+ printf("\t%s", msg);
+ if (showbytes)
+ printf(" (%lu bytes)", sbytes);
+ printf(".\n");
+}
+
+/*
+ * prflags --
+ * Print out flag values.
+ */
+void
+db_stat_prflags(flags, fnp)
+ u_int32_t flags;
+ const FN *fnp;
+{
+ const char *sep;
+
+ sep = "\t";
+ printf("Flags:");
+ for (; fnp->mask != 0; ++fnp)
+ if (fnp->mask & flags) {
+ printf("%s%s", sep, fnp->name);
+ sep = ", ";
+ }
+ printf("\n");
+}
+
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+int
+db_stat_db_init(dbenv, home, ttype)
+ DB_ENV *dbenv;
+ char *home;
+ test_t ttype;
+{
+ int ret;
+
+ /*
+ * If our environment open fails, and we're trying to look at a
+ * shared region, it's a hard failure.
+ *
+ * We will probably just drop core if the environment we join does
+ * not include a memory pool. This is probably acceptable; trying
+ * to use an existing environment that does not contain a memory
+ * pool to look at a database can be safely construed as operator
+ * error, I think.
+ */
+ if ((ret =
+ dbenv->open(dbenv, home, DB_JOINENV | DB_USE_ENVIRON, 0)) == 0)
+ return (0);
+ if (ttype != T_DB && ttype != T_LOG) {
+ dbenv->err(dbenv, ret, "DB_ENV->open%s%s",
+ home == NULL ? "" : ": ", home == NULL ? "" : home);
+ return (1);
+ }
+
+ /*
+ * We're looking at a database or set of log files and no environment
+ * exists. Create one, but make it private so no files are actually
+ * created.
+ *
+ * An environment is required to look at databases because we may be
+ * trying to look at databases in directories other than the current
+ * one.
+ */
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON,
+ 0)) == 0)
+ return (0);
+
+ /* An environment is required. */
+ dbenv->err(dbenv, ret, "open");
+ return (1);
+}
+
+/*
+ * argcheck --
+ * Return if argument flags are okay.
+ */
+int
+db_stat_argcheck(arg, ok_args)
+ char *arg;
+ const char *ok_args;
+{
+ for (; *arg != '\0'; ++arg)
+ if (strchr(ok_args, *arg) == NULL)
+ return (0);
+ return (1);
+}
+
+int
+db_stat_usage()
+{
+ fprintf(stderr, "usage: db_stat %s\n",
+"[-celmNtVZ] [-C Acflmo]\n\t[-d file [-f] [-s database]] [-h home] [-M Ahlm]");
+ return (EXIT_FAILURE);
+}
+
+int
+db_stat_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_stat/db_stat.wpj b/db/build_vxworks/db_stat/db_stat.wpj
new file mode 100644
index 000000000..c65db85ae
--- /dev/null
+++ b/db/build_vxworks/db_stat/db_stat.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_stat.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_stat.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_stat.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_stat.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_stat.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> FILE_db_stat.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_stat.c
+<END>
+
+<BEGIN> userComments
+db_stat
+<END>
diff --git a/db/build_vxworks/db_stat/db_stat/Makefile.component b/db/build_vxworks/db_stat/db_stat/Makefile.component
new file mode 100644
index 000000000..7b1723781
--- /dev/null
+++ b/db/build_vxworks/db_stat/db_stat/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_stat
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_stat.o \
+ compConfig.o
+COMPONENT_OBJS = db_stat.o
+DEPENDENCY_FILES = db_stat.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_stat.o \
+ compConfig.o
+COMPONENT_OBJS = db_stat.o
+DEPENDENCY_FILES = db_stat.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_stat.o \
+ compConfig.o
+COMPONENT_OBJS = db_stat.o
+DEPENDENCY_FILES = db_stat.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_stat.o \
+ compConfig.o
+COMPONENT_OBJS = db_stat.o
+DEPENDENCY_FILES = db_stat.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_stat.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_stat modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_stat.c
+ $(COMPILE_TRADITIONAL) db_stat_ctdt.c -o db_stat_ctdt.o
+ $(LD) -r -o db_stat.tmp $@ db_stat_ctdt.o
+ $(RM) $@
+ $(MV) db_stat.tmp $@
+#
+# Adds entry point table section to db_stat component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_stat modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_stat.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_stat.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_stat.o: >> $@
+ $(NM) db_stat.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_stat.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_stat/db_stat/Makefile.custom b/db/build_vxworks/db_stat/db_stat/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_stat/db_stat/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_stat/db_stat/component.cdf b/db/build_vxworks/db_stat/db_stat/component.cdf
new file mode 100644
index 000000000..728544eab
--- /dev/null
+++ b/db/build_vxworks/db_stat/db_stat/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_STAT {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_stat.o
+ NAME db_stat
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_stat.o {
+
+ NAME db_stat.o
+ SRC_PATH_NAME $PRJ_DIR/../db_stat.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_stat/db_stat/component.wpj b/db/build_vxworks/db_stat/db_stat/component.wpj
new file mode 100644
index 000000000..34b3e2fc7
--- /dev/null
+++ b/db/build_vxworks/db_stat/db_stat/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_stat.c_objects
+db_stat.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_stat_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_stat.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_upgrade/db_upgrade.c b/db/build_vxworks/db_upgrade/db_upgrade.c
new file mode 100644
index 000000000..ef62dc8bf
--- /dev/null
+++ b/db/build_vxworks/db_upgrade/db_upgrade.c
@@ -0,0 +1,190 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_upgrade.c,v 1.22 2001/08/06 13:42:33 bostic Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "clib_ext.h"
+
+int db_upgrade_main __P((int, char *[]));
+int db_upgrade_usage __P((void));
+int db_upgrade_version_check __P((const char *));
+
+int
+db_upgrade(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_upgrade", args, &argc, &argv);
+ return (db_upgrade_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_upgrade_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ const char *progname = "db_upgrade";
+ DB *dbp;
+ DB_ENV *dbenv;
+ u_int32_t flags;
+ int ch, e_close, exitval, nflag, ret, t_ret;
+ char *home;
+
+ if ((ret = db_upgrade_version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ flags = nflag = 0;
+ e_close = exitval = 0;
+ home = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "h:NsV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 's':
+ LF_SET(DB_DUPSORT);
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_upgrade_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (db_upgrade_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ /*
+ * If attaching to a pre-existing environment fails, create a
+ * private one and try again.
+ */
+ if ((ret = dbenv->open(dbenv,
+ home, DB_JOINENV | DB_USE_ENVIRON, 0)) != 0 &&
+ (ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->upgrade(dbp, argv[0], flags)) != 0)
+ dbp->err(dbp, ret, "DB->upgrade: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_upgrade_usage()
+{
+ fprintf(stderr, "usage: db_upgrade [-NsV] [-h home] db_file ...\n");
+ return (EXIT_FAILURE);
+}
+
+int
+db_upgrade_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_upgrade/db_upgrade.wpj b/db/build_vxworks/db_upgrade/db_upgrade.wpj
new file mode 100644
index 000000000..410b365a4
--- /dev/null
+++ b/db/build_vxworks/db_upgrade/db_upgrade.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_upgrade.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_upgrade.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_upgrade.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_upgrade.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_upgrade.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> FILE_db_upgrade.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_upgrade.c
+<END>
+
+<BEGIN> userComments
+db_upgrade
+<END>
diff --git a/db/build_vxworks/db_upgrade/db_upgrade/Makefile.component b/db/build_vxworks/db_upgrade/db_upgrade/Makefile.component
new file mode 100644
index 000000000..71ae5e98e
--- /dev/null
+++ b/db/build_vxworks/db_upgrade/db_upgrade/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_upgrade
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_upgrade.o \
+ compConfig.o
+COMPONENT_OBJS = db_upgrade.o
+DEPENDENCY_FILES = db_upgrade.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_upgrade.o \
+ compConfig.o
+COMPONENT_OBJS = db_upgrade.o
+DEPENDENCY_FILES = db_upgrade.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_upgrade.o \
+ compConfig.o
+COMPONENT_OBJS = db_upgrade.o
+DEPENDENCY_FILES = db_upgrade.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_upgrade.o \
+ compConfig.o
+COMPONENT_OBJS = db_upgrade.o
+DEPENDENCY_FILES = db_upgrade.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_upgrade.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_upgrade modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_upgrade.c
+ $(COMPILE_TRADITIONAL) db_upgrade_ctdt.c -o db_upgrade_ctdt.o
+ $(LD) -r -o db_upgrade.tmp $@ db_upgrade_ctdt.o
+ $(RM) $@
+ $(MV) db_upgrade.tmp $@
+#
+# Adds entry point table section to db_upgrade component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_upgrade modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_upgrade.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_upgrade.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_upgrade.o: >> $@
+ $(NM) db_upgrade.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_upgrade.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_upgrade/db_upgrade/Makefile.custom b/db/build_vxworks/db_upgrade/db_upgrade/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_upgrade/db_upgrade/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_upgrade/db_upgrade/component.cdf b/db/build_vxworks/db_upgrade/db_upgrade/component.cdf
new file mode 100644
index 000000000..7bbdebd49
--- /dev/null
+++ b/db/build_vxworks/db_upgrade/db_upgrade/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_UPGRADE {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_upgrade.o
+ NAME db_upgrade
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_upgrade.o {
+
+ NAME db_upgrade.o
+ SRC_PATH_NAME $PRJ_DIR/../db_upgrade.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_upgrade/db_upgrade/component.wpj b/db/build_vxworks/db_upgrade/db_upgrade/component.wpj
new file mode 100644
index 000000000..6e79d3b0e
--- /dev/null
+++ b/db/build_vxworks/db_upgrade/db_upgrade/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade.c_objects
+db_upgrade.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_upgrade_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_upgrade.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/db_verify/db_verify.c b/db/build_vxworks/db_verify/db_verify.c
new file mode 100644
index 000000000..c7c0ec066
--- /dev/null
+++ b/db/build_vxworks/db_verify/db_verify.c
@@ -0,0 +1,204 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char copyright[] =
+ "Copyright (c) 1996-2001\nSleepycat Software Inc. All rights reserved.\n";
+static const char revid[] =
+ "Id: db_verify.c,v 1.25 2001/10/09 18:20:32 bostic Exp ";
+#endif
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#include "db_int.h"
+#include "clib_ext.h"
+
+int db_verify_main __P((int, char *[]));
+int db_verify_usage __P((void));
+int db_verify_version_check __P((const char *));
+
+int
+db_verify(args)
+ char *args;
+{
+ int argc;
+ char **argv;
+
+ __db_util_arg("db_verify", args, &argc, &argv);
+ return (db_verify_main(argc, argv) ? EXIT_FAILURE : EXIT_SUCCESS);
+}
+
+#include <stdio.h>
+#define ERROR_RETURN ERROR
+
+int
+db_verify_main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind, __db_getopt_reset;
+ DB *dbp;
+ DB_ENV *dbenv;
+ const char *progname = "db_verify";
+ int ch, e_close, exitval, nflag, quiet, ret, t_ret;
+ char *home;
+
+ if ((ret = db_verify_version_check(progname)) != 0)
+ return (ret);
+
+ dbenv = NULL;
+ e_close = exitval = nflag = quiet = 0;
+ home = NULL;
+ __db_getopt_reset = 1;
+ while ((ch = getopt(argc, argv, "h:NqV")) != EOF)
+ switch (ch) {
+ case 'h':
+ home = optarg;
+ break;
+ case 'N':
+ nflag = 1;
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'V':
+ printf("%s\n", db_version(NULL, NULL, NULL));
+ return (EXIT_SUCCESS);
+ case '?':
+ default:
+ return (db_verify_usage());
+ }
+ argc -= optind;
+ argv += optind;
+
+ if (argc <= 0)
+ return (db_verify_usage());
+
+ /* Handle possible interruptions. */
+ __db_util_siginit();
+
+ /*
+ * Create an environment object and initialize it for error
+ * reporting.
+ */
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_env_create: %s\n",
+ progname, db_strerror(ret));
+ goto shutdown;
+ }
+ e_close = 1;
+
+ /*
+ * XXX
+ * We'd prefer to have error output configured while calling
+ * db_env_create, but there's no way to turn it off once it's
+ * turned on.
+ */
+ if (!quiet) {
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, progname);
+ }
+
+ if (nflag) {
+ if ((ret = dbenv->set_flags(dbenv, DB_NOLOCKING, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOLOCKING");
+ goto shutdown;
+ }
+ if ((ret = dbenv->set_flags(dbenv, DB_NOPANIC, 0)) != 0) {
+ dbenv->err(dbenv, ret, "set_flags: DB_NOPANIC");
+ goto shutdown;
+ }
+ }
+
+ /*
+ * Attach to an mpool if it exists, but if that fails, attach to a
+ * private region. In the latter case, declare a reasonably large
+ * cache so that we don't fail when verifying large databases.
+ */
+ if ((ret =
+ dbenv->open(dbenv, home, DB_INIT_MPOOL | DB_USE_ENVIRON, 0)) != 0) {
+ if ((ret = dbenv->set_cachesize(dbenv, 0, MEGABYTE, 1)) != 0) {
+ dbenv->err(dbenv, ret, "set_cachesize");
+ goto shutdown;
+ }
+ if ((ret = dbenv->open(dbenv, home,
+ DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE | DB_USE_ENVIRON, 0)) != 0) {
+ dbenv->err(dbenv, ret, "open");
+ goto shutdown;
+ }
+ }
+
+ for (; !__db_util_interrupted() && argv[0] != NULL; ++argv) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ goto shutdown;
+ }
+ if (!quiet) {
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ }
+ if ((ret = dbp->verify(dbp, argv[0], NULL, NULL, 0)) != 0)
+ dbp->err(dbp, ret, "DB->verify: %s", argv[0]);
+ if ((t_ret = dbp->close(dbp, 0)) != 0 && ret == 0) {
+ dbenv->err(dbenv, ret, "DB->close: %s", argv[0]);
+ ret = t_ret;
+ }
+ if (ret != 0)
+ goto shutdown;
+ }
+
+ if (0) {
+shutdown: exitval = 1;
+ }
+ if (e_close && (ret = dbenv->close(dbenv, 0)) != 0) {
+ exitval = 1;
+ fprintf(stderr,
+ "%s: dbenv->close: %s\n", progname, db_strerror(ret));
+ }
+
+ /* Resend any caught signal. */
+ __db_util_sigresend();
+
+ return (exitval == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
+}
+
+int
+db_verify_usage()
+{
+ fprintf(stderr, "usage: db_verify [-NqV] [-h home] db_file ...\n");
+ return (EXIT_FAILURE);
+}
+
+int
+db_verify_version_check(progname)
+ const char *progname;
+{
+ int v_major, v_minor, v_patch;
+
+ /* Make sure we're loaded with the right version of the DB library. */
+ (void)db_version(&v_major, &v_minor, &v_patch);
+ if (v_major != DB_VERSION_MAJOR ||
+ v_minor != DB_VERSION_MINOR || v_patch != DB_VERSION_PATCH) {
+ fprintf(stderr,
+ "%s: version %d.%d.%d doesn't match library version %d.%d.%d\n",
+ progname, DB_VERSION_MAJOR, DB_VERSION_MINOR,
+ DB_VERSION_PATCH, v_major, v_minor, v_patch);
+ return (EXIT_FAILURE);
+ }
+ return (0);
+}
diff --git a/db/build_vxworks/db_verify/db_verify.wpj b/db/build_vxworks/db_verify/db_verify.wpj
new file mode 100644
index 000000000..a4d11642b
--- /dev/null
+++ b/db/build_vxworks/db_verify/db_verify.wpj
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+db_verify.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/db_verify.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_db_verify.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_db_verify.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE_db_verify.c_dependencies
+
+<END>
+
+<BEGIN> FILE_db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> FILE_db_verify.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/db_verify.c
+<END>
+
+<BEGIN> userComments
+db_verify
+<END>
diff --git a/db/build_vxworks/db_verify/db_verify/Makefile.component b/db/build_vxworks/db_verify/db_verify/Makefile.component
new file mode 100644
index 000000000..2748647a6
--- /dev/null
+++ b/db/build_vxworks/db_verify/db_verify/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = db_verify
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_verify.o \
+ compConfig.o
+COMPONENT_OBJS = db_verify.o
+DEPENDENCY_FILES = db_verify.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_verify.o \
+ compConfig.o
+COMPONENT_OBJS = db_verify.o
+DEPENDENCY_FILES = db_verify.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_verify.o \
+ compConfig.o
+COMPONENT_OBJS = db_verify.o
+DEPENDENCY_FILES = db_verify.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = db_verify.o \
+ compConfig.o
+COMPONENT_OBJS = db_verify.o
+DEPENDENCY_FILES = db_verify.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+db_verify.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all db_verify modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > db_verify.c
+ $(COMPILE_TRADITIONAL) db_verify_ctdt.c -o db_verify_ctdt.o
+ $(LD) -r -o db_verify.tmp $@ db_verify_ctdt.o
+ $(RM) $@
+ $(MV) db_verify.tmp $@
+#
+# Adds entry point table section to db_verify component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all db_verify modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+db_verify.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+db_verify.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) db_verify.o: >> $@
+ $(NM) db_verify.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) db_verify.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/db_verify/db_verify/Makefile.custom b/db/build_vxworks/db_verify/db_verify/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/db_verify/db_verify/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/db_verify/db_verify/component.cdf b/db/build_vxworks/db_verify/db_verify/component.cdf
new file mode 100644
index 000000000..f29f8246b
--- /dev/null
+++ b/db/build_vxworks/db_verify/db_verify/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DB_VERIFY {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES db_verify.o
+ NAME db_verify
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module db_verify.o {
+
+ NAME db_verify.o
+ SRC_PATH_NAME $PRJ_DIR/../db_verify.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/db_verify/db_verify/component.wpj b/db/build_vxworks/db_verify/db_verify/component.wpj
new file mode 100644
index 000000000..2956dcbe0
--- /dev/null
+++ b/db/build_vxworks/db_verify/db_verify/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_verify.c_objects
+db_verify.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../db_verify_DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../db_verify.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/build_vxworks/demo/DBdemo.wpj b/db/build_vxworks/demo/DBdemo.wpj
new file mode 100755
index 000000000..c78c0a7c6
--- /dev/null
+++ b/db/build_vxworks/demo/DBdemo.wpj
@@ -0,0 +1,162 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+DBdemo.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/DBdemo.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I. \
+ -I$(PRJ_DIR)/.. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_DBdemo.out
+
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/dbdemo.c_dependDone
+TRUE
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/dbdemo.c_dependencies
+$(PRJ_DIR)/../db.h
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> FILE_$(PRJ_DIR)/dbdemo.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/dbdemo.c
+<END>
+
+<BEGIN> userComments
+DB Demo program
+<END>
+
diff --git a/db/build_vxworks/demo/README b/db/build_vxworks/demo/README
new file mode 100644
index 000000000..6ef111d35
--- /dev/null
+++ b/db/build_vxworks/demo/README
@@ -0,0 +1,30 @@
+
+This README describes the steps needed to run a demo example
+of BerkeleyDB.
+
+1. Read the pages in the Reference Guide that describe building
+BerkeleyDB on VxWorks:
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/intro.html
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/notes.html
+ $(WIND_BASE)/target/src/BerkeleyDB/docs/ref/build_vxworks/faq.html
+
+2. Launch Tornado 2.0 and open up the BerkeleyDB project.
+3. Add the demo project to that workspace:
+ $(WIND_BASE)/target/src/BerkeleyDB/build_vxworks/demo/DBdemo.wpj
+4. Build BerkeleyDB as described in the Reference Guide.
+5. Build the DBdemo project.
+6. Download BerkeleyDB onto the target.
+7. Download the Dbdemo project onto the target.
+8. Open a windsh to the target and run the demo:
+ -> dbdemo "<pathname>/<dbname>"
+Where pathname is a pathname string pointing to a directory that
+the demo can create a database in. That directory should already
+exist. The dbname is the name for the database. For example:
+-> dbdemo "/tmp/demo.db"
+
+9. The demo program will ask for input. You can type in any string.
+The program will add an entry to the database with that string as the
+key and the reverse of that string as the data item for that key.
+It will continue asking for input until you hit ^D or enter "quit".
+Upon doing so, the demo program will display all the keys you have
+entered as input and their data items.
diff --git a/db/build_vxworks/demo/dbdemo.c b/db/build_vxworks/demo/dbdemo.c
new file mode 100644
index 000000000..e3da901c4
--- /dev/null
+++ b/db/build_vxworks/demo/dbdemo.c
@@ -0,0 +1,130 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: dbdemo.c,v 1.1 2001/09/11 15:51:16 sue Exp
+ */
+
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <db.h>
+
+#include "stdio.h"
+#define ERROR_RETURN ERROR
+
+int dbdemo __P((char *));
+
+int
+dbdemo(database)
+ char *database;
+{
+ DB *dbp;
+ DBC *dbcp;
+ DBT key, data;
+ u_int32_t len;
+ int ret;
+ char *p, *t, buf[1024], rbuf[1024];
+ const char *progname = "dbdemo"; /* Program name. */
+
+ /* Remove the previous database. */
+ (void)remove(database);
+
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, NULL, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->set_pagesize(dbp, 1024)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if ((ret = dbp->set_cachesize(dbp, 0, 32 * 1024, 0)) != 0) {
+ dbp->err(dbp, ret, "set_cachesize");
+ goto err1;
+ }
+ if ((ret =
+ dbp->open(dbp, database, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", database);
+ goto err1;
+ }
+
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ for (;;) {
+ printf("input> ");
+ fflush(stdout);
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ if (strncmp(buf, "quit", 4) == 0)
+ break;
+ if ((len = strlen(buf)) <= 1)
+ continue;
+ for (t = rbuf, p = buf + (len - 2); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ key.data = buf;
+ data.data = rbuf;
+ data.size = key.size = len - 1;
+
+ switch (ret =
+ dbp->put(dbp, NULL, &key, &data, DB_NOOVERWRITE)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err1;
+ break;
+ }
+ }
+ printf("\n");
+
+ /* Acquire a cursor for the database. */
+ if ((ret = dbp->cursor(dbp, NULL, &dbcp, 0)) != 0) {
+ dbp->err(dbp, ret, "DB->cursor");
+ goto err1;
+ }
+
+ /* Initialize the key/data pair so the flags aren't set. */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ /* Walk through the database and print out the key/data pairs. */
+ while ((ret = dbcp->c_get(dbcp, &key, &data, DB_NEXT)) == 0)
+ printf("%.*s : %.*s\n",
+ (int)key.size, (char *)key.data,
+ (int)data.size, (char *)data.data);
+ if (ret != DB_NOTFOUND) {
+ dbp->err(dbp, ret, "DBcursor->get");
+ goto err2;
+ }
+
+ /* Close everything down. */
+ if ((ret = dbcp->c_close(dbcp)) != 0) {
+ dbp->err(dbp, ret, "DBcursor->close");
+ goto err1;
+ }
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (ERROR_RETURN);
+ }
+ return (0);
+
+err2: (void)dbcp->c_close(dbcp);
+err1: (void)dbp->close(dbp, 0);
+ return (ERROR_RETURN);
+}
diff --git a/db/build_vxworks/demo/demo/Makefile.component b/db/build_vxworks/demo/demo/Makefile.component
new file mode 100644
index 000000000..ad7208a77
--- /dev/null
+++ b/db/build_vxworks/demo/demo/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = demo
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = dbdemo.o \
+ compConfig.o
+COMPONENT_OBJS = dbdemo.o
+DEPENDENCY_FILES = dbdemo.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = dbdemo.o \
+ compConfig.o
+COMPONENT_OBJS = dbdemo.o
+DEPENDENCY_FILES = dbdemo.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = dbdemo.o \
+ compConfig.o
+COMPONENT_OBJS = dbdemo.o
+DEPENDENCY_FILES = dbdemo.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = dbdemo.o \
+ compConfig.o
+COMPONENT_OBJS = dbdemo.o
+DEPENDENCY_FILES = dbdemo.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+demo.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all demo modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > demo_ctdt.c
+ $(COMPILE_TRADITIONAL) demo_ctdt.c -o demo_ctdt.o
+ $(LD) -r -o demo.tmp $@ demo_ctdt.o
+ $(RM) $@
+ $(MV) demo.tmp $@
+#
+# Adds entry point table section to demo component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all demo modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+demo.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+demo.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) dbdemo.o: >> $@
+ $(NM) dbdemo.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) dbdemo.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/build_vxworks/demo/demo/Makefile.custom b/db/build_vxworks/demo/demo/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/build_vxworks/demo/demo/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/build_vxworks/demo/demo/component.cdf b/db/build_vxworks/demo/demo/component.cdf
new file mode 100644
index 000000000..f927693ee
--- /dev/null
+++ b/db/build_vxworks/demo/demo/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE_DEMO {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES dbdemo.o
+ NAME demo
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module dbdemo.o {
+
+ NAME dbdemo.o
+ SRC_PATH_NAME $PRJ_DIR/../dbdemo.c
+}
+
+/* Parameter information */
+
diff --git a/db/build_vxworks/demo/demo/component.wpj b/db/build_vxworks/demo/demo/component.wpj
new file mode 100644
index 000000000..01c459909
--- /dev/null
+++ b/db/build_vxworks/demo/demo/component.wpj
@@ -0,0 +1,607 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_objects
+dbdemo.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../dbdemo.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../dbdemo.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/common/util_arg.c b/db/common/util_arg.c
new file mode 100644
index 000000000..f69c153d6
--- /dev/null
+++ b/db/common/util_arg.c
@@ -0,0 +1,127 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: util_arg.c,v 1.2 2001/10/04 21:11:48 bostic Exp ";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+
+static char *__db_strsep __P((char **, const char *));
+
+/*
+ * __db_util_arg --
+ * Convert a string into an argc/argv pair.
+ *
+ * PUBLIC: int __db_util_arg __P((char *, char *, int *, char ***));
+ */
+int
+__db_util_arg(arg0, str, argcp, argvp)
+ char *arg0, *str, ***argvp;
+ int *argcp;
+{
+ int n, ret;
+ char **ap, **argv;
+
+#define MAXARGS 25
+ if ((ret =
+ __os_malloc(NULL, (MAXARGS + 1) * sizeof(char **), &argv)) != 0)
+ return (ret);
+
+ ap = argv;
+ *ap++ = arg0;
+ for (n = 1; (*ap = __db_strsep(&str, " \t")) != NULL;)
+ if (**ap != '\0') {
+ ++ap;
+ if (++n == MAXARGS)
+ break;
+ }
+ *ap = NULL;
+
+ *argcp = ap - argv;
+ *argvp = argv;
+
+ return (0);
+}
+
+/*-
+ * Copyright (c) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Get next token from string *stringp, where tokens are possibly-empty
+ * strings separated by characters from delim.
+ *
+ * Writes NULs into the string at *stringp to end tokens.
+ * delim need not remain constant from call to call.
+ * On return, *stringp points past the last NUL written (if there might
+ * be further tokens), or is NULL (if there are definitely no more tokens).
+ *
+ * If *stringp is NULL, strsep returns NULL.
+ */
+static char *
+__db_strsep(stringp, delim)
+ char **stringp;
+ const char *delim;
+{
+ const char *spanp;
+ int c, sc;
+ char *s, *tok;
+
+ if ((s = *stringp) == NULL)
+ return (NULL);
+ for (tok = s;;) {
+ c = *s++;
+ spanp = delim;
+ do {
+ if ((sc = *spanp++) == c) {
+ if (c == 0)
+ s = NULL;
+ else
+ s[-1] = 0;
+ *stringp = s;
+ return (tok);
+ }
+ } while (sc != 0);
+ }
+ /* NOTREACHED */
+}
diff --git a/db/cxx/cxx_db.cpp b/db/cxx/cxx_db.cpp
new file mode 100644
index 000000000..d573d48e4
--- /dev/null
+++ b/db/cxx/cxx_db.cpp
@@ -0,0 +1,822 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: cxx_db.cpp,v 11.49 2001/07/28 20:01:18 dda Exp ";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_auto.h"
+#include "crdel_auto.h"
+#include "db_ext.h"
+#include "common_ext.h"
+
+// A truism for the Db object is that there is a valid
+// DB handle from the constructor until close().
+// After the close, the DB handle is invalid and
+// no operations are permitted on the Db (other than
+// destructor). Leaving the Db handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow Db objects to be closed and reopened.
+// This implied always keeping a valid DB object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+Db::Db(DbEnv *env, u_int32_t flags)
+: imp_(0)
+, env_(env)
+, construct_error_(0)
+, flags_(0)
+, construct_flags_(flags)
+{
+ if (env_ == 0)
+ flags_ |= DB_CXX_PRIVATE_ENV;
+ initialize();
+}
+
+// Note: if the user has not closed, we call _destroy_check
+// to warn against this non-safe programming practice.
+// We can't close, because the environment may already
+// be closed/destroyed.
+//
+Db::~Db()
+{
+ DB *db;
+
+ db = unwrap(this);
+ if (db != NULL) {
+ DbEnv::_destroy_check("Db", 0);
+ cleanup();
+ }
+}
+
+// private method to initialize during constructor.
+// initialize must create a backing DB object,
+// and if that creates a new DB_ENV, it must be tied to a new DbEnv.
+// If there is an error, construct_error_ is set; this is examined
+// during open.
+//
+int Db::initialize()
+{
+ u_int32_t cxx_flags;
+ DB *db;
+ int err;
+ DB_ENV *cenv = unwrap(env_);
+
+ cxx_flags = construct_flags_ & DB_CXX_NO_EXCEPTIONS;
+
+ // Create a new underlying DB object.
+ // We rely on the fact that if a NULL DB_ENV* is given,
+ // one is allocated by DB.
+ //
+ if ((err = db_create(&db, cenv,
+ construct_flags_ & ~cxx_flags)) != 0) {
+ construct_error_ = err;
+ return (err);
+ }
+
+ // Associate the DB with this object
+ imp_ = wrap(db);
+ db->cj_internal = this;
+
+ // Create a new DbEnv from a DB_ENV* if it was created locally.
+ // It is deleted in Db::close().
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0)
+ env_ = new DbEnv(db->dbenv, cxx_flags);
+
+ return (0);
+}
+
+// private method to cleanup after destructor or during close.
+// If the environment was created by this Db object, we optionally
+// delete it, or return it so the caller can delete it after
+// last use.
+//
+void Db::cleanup()
+{
+ DB *db = unwrap(this);
+
+ if (db != NULL) {
+ // extra safety
+ db->cj_internal = 0;
+ imp_ = 0;
+
+ // we must dispose of the DbEnv object if
+ // we created it. This will be the case
+ // if a NULL DbEnv was passed into the constructor.
+ // The underlying DB_ENV object will be inaccessible
+ // after the close, so we must clean it up now.
+ //
+ if ((flags_ & DB_CXX_PRIVATE_ENV) != 0) {
+ env_->cleanup();
+ delete env_;
+ env_ = 0;
+ }
+ }
+ construct_error_ = 0;
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int Db::error_policy()
+{
+ if (env_ != NULL)
+ return (env_->error_policy());
+ else {
+ // If the env_ is null, that means that the user
+ // did not attach an environment, so the correct error
+ // policy can be deduced from constructor flags
+ // for this Db.
+ //
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+ }
+}
+
+int Db::close(u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ // after a DB->close (no matter if success or failure),
+ // the underlying DB object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((err = db->close(db, flags)) != 0 && err != DB_INCOMPLETE)
+ DB_ERROR("Db::close", err, error_policy());
+
+ return (err);
+}
+
+int Db::cursor(DbTxn *txnid, Dbc **cursorp, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ DBC *dbc = 0;
+ int err;
+
+ if ((err = db->cursor(db, unwrap(txnid), &dbc, flags)) != 0) {
+ DB_ERROR("Db::cursor", err, error_policy());
+ return (err);
+ }
+
+ // The following cast implies that Dbc can be no larger than DBC
+ *cursorp = (Dbc*)dbc;
+ return (0);
+}
+
+int Db::del(DbTxn *txnid, Dbt *key, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ if ((err = db->del(db, unwrap(txnid), key, flags)) != 0) {
+ // DB_NOTFOUND is a "normal" return, so should not be
+ // thrown as an error
+ //
+ if (err != DB_NOTFOUND) {
+ DB_ERROR("Db::del", err, error_policy());
+ return (err);
+ }
+ }
+ return (err);
+}
+
+void Db::err(int error, const char *format, ...)
+{
+ va_list args;
+ DB *db = unwrap(this);
+
+ va_start(args, format);
+ __db_real_err(db->dbenv, error, 1, 1, format, args);
+ va_end(args);
+}
+
+void Db::errx(const char *format, ...)
+{
+ va_list args;
+ DB *db = unwrap(this);
+
+ va_start(args, format);
+ __db_real_err(db->dbenv, 0, 0, 1, format, args);
+ va_end(args);
+}
+
+int Db::fd(int *fdp)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ if ((err = db->fd(db, fdp)) != 0) {
+ DB_ERROR("Db::fd", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int Db::get(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ if ((err = db->get(db, unwrap(txnid), key, value, flags)) != 0) {
+ // DB_NOTFOUND and DB_KEYEMPTY are "normal" returns,
+ // so should not be thrown as an error
+ //
+ if (err != DB_NOTFOUND && err != DB_KEYEMPTY) {
+ const char *name = "Db::get";
+ if (err == ENOMEM && DB_OVERFLOWED_DBT(value))
+ DB_ERROR_DBT(name, value, error_policy());
+ else
+ DB_ERROR(name, err, error_policy());
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Db::get_byteswapped(int *isswapped)
+{
+ DB *db = (DB *)unwrapConst(this);
+ return (db->get_byteswapped(db, isswapped));
+}
+
+int Db::get_type(DBTYPE *dbtype)
+{
+ DB *db = (DB *)unwrapConst(this);
+ return (db->get_type(db, dbtype));
+}
+
+int Db::join(Dbc **curslist, Dbc **cursorp, u_int32_t flags)
+{
+ // Dbc is a "compatible" subclass of DBC -
+ // that is, no virtual functions or even extra data members,
+ // so this cast, although technically non-portable,
+ // "should" always be okay.
+ //
+ DBC **list = (DBC **)(curslist);
+ DB *db = unwrap(this);
+ DBC *dbc = 0;
+ int err;
+
+ if ((err = db->join(db, list, &dbc, flags)) != 0) {
+ DB_ERROR("Db::join_cursor", err, error_policy());
+ return (err);
+ }
+ *cursorp = (Dbc*)dbc;
+ return (0);
+}
+
+int Db::key_range(DbTxn *txnid, Dbt *key,
+ DB_KEY_RANGE *results, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ if ((err = db->key_range(db, unwrap(txnid), key,
+ results, flags)) != 0) {
+ DB_ERROR("Db::key_range", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int Db::open(const char *file, const char *database,
+ DBTYPE type, u_int32_t flags, int mode)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if ((err = construct_error_) != 0)
+ DB_ERROR("Db::open", construct_error_, error_policy());
+ else if ((err = db->open(db, file, database, type, flags, mode)) != 0)
+ DB_ERROR("Db::open", err, error_policy());
+
+ return (err);
+}
+
+int Db::pget(DbTxn *txnid, Dbt *key, Dbt *pkey, Dbt *value, u_int32_t flags)
+{
+ DB *db = unwrap(this);
+ int err;
+
+ if ((err = db->pget(db, unwrap(txnid), key, pkey,
+ value, flags)) != 0) {
+ // DB_NOTFOUND and DB_KEYEMPTY are "normal" returns,
+ // so should not be thrown as an error
+ //
+ if (err != DB_NOTFOUND && err != DB_KEYEMPTY) {
+ const char *name = "Db::pget";
+ if (err == ENOMEM && DB_OVERFLOWED_DBT(value))
+ DB_ERROR_DBT(name, value, error_policy());
+ else
+ DB_ERROR(name, err, error_policy());
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Db::put(DbTxn *txnid, Dbt *key, Dbt *value, u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if ((err = db->put(db, unwrap(txnid), key, value, flags)) != 0) {
+
+ // DB_KEYEXIST is a "normal" return, so should not be
+ // thrown as an error
+ //
+ if (err != DB_KEYEXIST) {
+ DB_ERROR("Db::put", err, error_policy());
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Db::rename(const char *file, const char *database,
+ const char *newname, u_int32_t flags)
+{
+ int err = 0;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::rename", EINVAL, error_policy());
+ return (EINVAL);
+ }
+
+ // after a DB->rename (no matter if success or failure),
+ // the underlying DB object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ if ((err = db->rename(db, file, database, newname, flags)) != 0) {
+ DB_ERROR("Db::rename", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int Db::remove(const char *file, const char *database, u_int32_t flags)
+{
+ int err = 0;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::remove", EINVAL, error_policy());
+ return (EINVAL);
+ }
+
+ // after a DB->remove (no matter if success or failure),
+ // the underlying DB object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ if ((err = db->remove(db, file, database, flags)) != 0)
+ DB_ERROR("Db::remove", err, error_policy());
+
+ return (err);
+}
+
+int Db::truncate(DbTxn *txnid, u_int32_t *countp, u_int32_t flags)
+{
+ int err = 0;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::truncate", EINVAL, error_policy());
+ return (EINVAL);
+ }
+ if ((err = db->truncate(db, unwrap(txnid), countp, flags)) != 0) {
+ DB_ERROR("Db::truncate", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int Db::stat(void *sp, u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::stat", EINVAL, error_policy());
+ return (EINVAL);
+ }
+ if ((err = db->stat(db, sp, flags)) != 0) {
+ DB_ERROR("Db::stat", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int Db::sync(u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::sync", EINVAL, error_policy());
+ return (EINVAL);
+ }
+ if ((err = db->sync(db, flags)) != 0 && err != DB_INCOMPLETE) {
+ DB_ERROR("Db::sync", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int Db::upgrade(const char *name, u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::upgrade", EINVAL, error_policy());
+ return (EINVAL);
+ }
+ if ((err = db->upgrade(db, name, flags)) != 0) {
+ DB_ERROR("Db::upgrade", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+////////////////////////////////////////////////////////////////////////
+//
+// callbacks
+//
+// *_intercept_c are 'glue' functions that must be declared
+// as extern "C" so to be typesafe. Using a C++ method, even
+// a static class method with 'correct' arguments, will not pass
+// the test; some picky compilers do not allow mixing of function
+// pointers to 'C' functions with function pointers to C++ functions.
+//
+// One wart with this scheme is that the *_callback_ method pointer
+// must be declared public to be accessible by the C intercept.
+// It's possible to accomplish the goal without this, and with
+// another public transfer method, but it's just too much overhead.
+// These callbacks are supposed to be *fast*.
+//
+// The DBTs we receive in these callbacks from the C layer may be
+// manufactured there, but we want to treat them as a Dbts.
+// Technically speaking, these DBTs were not constructed as a Dbts,
+// but it should be safe to cast them as such given that Dbt is a
+// *very* thin extension of the DBT. That is, Dbt has no additional
+// data elements, does not use virtual functions, virtual inheritance,
+// multiple inheritance, RTI, or any other language feature that
+// causes the structure to grow or be displaced. Although this may
+// sound risky, a design goal of C++ is complete structure
+// compatibility with C, and has the philosophy 'if you don't use it,
+// you shouldn't incur the overhead'. If the C/C++ compilers you're
+// using on a given machine do not have matching struct layouts, then
+// a lot more things will be broken than just this.
+//
+// The alternative, creating a Dbt here in the callback, and populating
+// it from the DBT, is just too slow and cumbersome to be very useful.
+
+/* associate callback */
+extern "C" int _db_associate_intercept_c(DB *secondary,
+ const DBT *key,
+ const DBT *data,
+ DBT *retval)
+{
+ Db *cxxthis;
+
+ DB_ASSERT(secondary != NULL);
+ cxxthis = (Db *)secondary->cj_internal;
+ DB_ASSERT(cxxthis != NULL);
+ DB_ASSERT(cxxthis->associate_callback_ != 0);
+
+ return (*cxxthis->associate_callback_)(cxxthis,
+ Dbt::get_const_Dbt(key),
+ Dbt::get_const_Dbt(data),
+ Dbt::get_Dbt(retval));
+}
+
+int Db::associate(Db *secondary, int (*callback)(Db *, const Dbt *,
+ const Dbt *, Dbt *), u_int32_t flags)
+{
+ DB *cthis = unwrap(this);
+
+ /* Since the secondary Db is used as the first argument
+ * to the callback, we store the C++ callback on it
+ * rather than on 'this'.
+ */
+ secondary->associate_callback_ = callback;
+ return ((*(cthis->associate))
+ (cthis, unwrap(secondary), _db_associate_intercept_c, flags));
+}
+
+/* feedback callback */
+extern "C" void _db_feedback_intercept_c(DB *cthis, int opcode, int pct)
+{
+ Db *cxxthis;
+
+ DB_ASSERT(cthis != NULL);
+ cxxthis = (Db *)cthis->cj_internal;
+ DB_ASSERT(cxxthis != NULL);
+ DB_ASSERT(cxxthis->feedback_callback_ != 0);
+
+ (*cxxthis->feedback_callback_)(cxxthis, opcode, pct);
+ return;
+}
+
+int Db::set_feedback(void (*arg)(Db *cxxthis, int opcode, int pct))
+{
+ DB *cthis = unwrap(this);
+ feedback_callback_ = arg;
+ return ((*(cthis->set_feedback))
+ (cthis, _db_feedback_intercept_c));
+}
+
+/* append_recno callback */
+extern "C" int _db_append_recno_intercept_c(DB *cthis, DBT *data,
+ db_recno_t recno)
+{
+ Db *cxxthis;
+
+ DB_ASSERT(cthis != NULL);
+ cxxthis = (Db *)cthis->cj_internal;
+ DB_ASSERT(cxxthis != NULL);
+ DB_ASSERT(cxxthis->append_recno_callback_ != 0);
+
+ return (*cxxthis->append_recno_callback_)(cxxthis,
+ Dbt::get_Dbt(data),
+ recno);
+}
+
+int Db::set_append_recno(int (*arg)(Db *cxxthis, Dbt *data, db_recno_t recno))
+{
+ DB *cthis = unwrap(this);
+ append_recno_callback_ = arg;
+ return ((*(cthis->set_append_recno))
+ (cthis, _db_append_recno_intercept_c));
+}
+
+/* bt_compare callback */
+extern "C" int _db_bt_compare_intercept_c(DB *cthis, const DBT *data1,
+ const DBT *data2)
+{
+ Db *cxxthis;
+
+ DB_ASSERT(cthis != NULL);
+ cxxthis = (Db *)cthis->cj_internal;
+ DB_ASSERT(cxxthis != NULL);
+ DB_ASSERT(cxxthis->bt_compare_callback_ != 0);
+
+ return (*cxxthis->bt_compare_callback_)(cxxthis,
+ Dbt::get_const_Dbt(data1),
+ Dbt::get_const_Dbt(data2));
+}
+
+int Db::set_bt_compare(int (*arg)(Db *cxxthis, const Dbt *data1,
+ const Dbt *data2))
+{
+ DB *cthis = unwrap(this);
+ bt_compare_callback_ = arg;
+ return ((*(cthis->set_bt_compare))
+ (cthis, _db_bt_compare_intercept_c));
+}
+
+/* bt_prefix callback */
+extern "C" size_t _db_bt_prefix_intercept_c(DB *cthis, const DBT *data1,
+ const DBT *data2)
+{
+ Db *cxxthis;
+
+ DB_ASSERT(cthis != NULL);
+ cxxthis = (Db *)cthis->cj_internal;
+ DB_ASSERT(cxxthis != NULL);
+ DB_ASSERT(cxxthis->bt_prefix_callback_ != 0);
+
+ return (*cxxthis->bt_prefix_callback_)(cxxthis,
+ Dbt::get_const_Dbt(data1),
+ Dbt::get_const_Dbt(data2));
+}
+
+int Db::set_bt_prefix(size_t (*arg)(Db *cxxthis, const Dbt *data1,
+ const Dbt *data2))
+{
+ DB *cthis = unwrap(this);
+ bt_prefix_callback_ = arg;
+ return ((*(cthis->set_bt_prefix))
+ (cthis, _db_bt_prefix_intercept_c));
+}
+
+/* dup_compare callback */
+extern "C" int _db_dup_compare_intercept_c(DB *cthis, const DBT *data1,
+ const DBT *data2)
+{
+ Db *cxxthis;
+
+ DB_ASSERT(cthis != NULL);
+ cxxthis = (Db *)cthis->cj_internal;
+ DB_ASSERT(cxxthis != NULL);
+ DB_ASSERT(cxxthis->dup_compare_callback_ != 0);
+
+ return (*cxxthis->dup_compare_callback_)(cxxthis,
+ Dbt::get_const_Dbt(data1),
+ Dbt::get_const_Dbt(data2));
+}
+
+int Db::set_dup_compare(int (*arg)(Db *cxxthis, const Dbt *data1,
+ const Dbt *data2))
+{
+ DB *cthis = unwrap(this);
+ dup_compare_callback_ = arg;
+ return ((*(cthis->set_dup_compare))
+ (cthis, _db_dup_compare_intercept_c));
+}
+
+/* h_hash callback */
+extern "C" u_int32_t _db_h_hash_intercept_c(DB *cthis, const void *data,
+ u_int32_t len)
+{
+ Db *cxxthis;
+
+ DB_ASSERT(cthis != NULL);
+ cxxthis = (Db *)cthis->cj_internal;
+ DB_ASSERT(cxxthis != NULL);
+ DB_ASSERT(cxxthis->h_hash_callback_ != 0);
+
+ return (*cxxthis->h_hash_callback_)(cxxthis, data, len);
+}
+
+int Db::set_h_hash(u_int32_t (*arg)(Db *cxxthis, const void *data,
+ u_int32_t len))
+{
+ DB *cthis = unwrap(this);
+ h_hash_callback_ = arg;
+ return ((*(cthis->set_h_hash))
+ (cthis, _db_h_hash_intercept_c));
+}
+
+// This is a 'glue' function declared as extern "C" so it will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+int _verify_callback_c(void *handle, const void *str_arg)
+{
+ char *str;
+ ostream *out;
+
+ str = (char *)str_arg;
+ out = (ostream *)handle;
+
+ (*out) << str;
+ if (out->fail())
+ return (EIO);
+
+ return (0);
+}
+
+int Db::verify(const char *name, const char *subdb,
+ ostream *ostr, u_int32_t flags)
+{
+ int err;
+ DB *db = unwrap(this);
+
+ if (!db) {
+ DB_ERROR("Db::verify", EINVAL, error_policy());
+ return (EINVAL);
+ }
+ if ((err = __db_verify_internal(db, name, subdb, ostr,
+ _verify_callback_c, flags)) != 0) {
+ DB_ERROR("Db::verify", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+// This is a variant of the DB_WO_ACCESS macro to define a simple set_
+// method calling the underlying C method, but unlike a simple
+// set method, it may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g. "char *arg") defined in terms of "arg".
+//
+#define DB_DB_ACCESS(_name, _argspec) \
+\
+int Db::set_##_name(_argspec) \
+{ \
+ int ret; \
+ DB *db = unwrap(this); \
+ \
+ if ((ret = (*(db->set_##_name))(db, arg)) != 0) { \
+ DB_ERROR("Db::set_" # _name, ret, error_policy()); \
+ } \
+ return (ret); \
+}
+
+#define DB_DB_ACCESS_NORET(_name, _argspec) \
+ \
+void Db::set_##_name(_argspec) \
+{ \
+ DB *db = unwrap(this); \
+ \
+ (*(db->set_##_name))(db, arg); \
+ return; \
+}
+
+DB_DB_ACCESS(bt_compare, bt_compare_fcn_type arg)
+DB_DB_ACCESS(bt_maxkey, u_int32_t arg)
+DB_DB_ACCESS(bt_minkey, u_int32_t arg)
+DB_DB_ACCESS(bt_prefix, bt_prefix_fcn_type arg)
+DB_DB_ACCESS(dup_compare, dup_compare_fcn_type arg)
+DB_DB_ACCESS_NORET(errfile, FILE *arg)
+DB_DB_ACCESS_NORET(errpfx, const char *arg)
+DB_DB_ACCESS(flags, u_int32_t arg)
+DB_DB_ACCESS(h_ffactor, u_int32_t arg)
+DB_DB_ACCESS(h_hash, h_hash_fcn_type arg)
+DB_DB_ACCESS(h_nelem, u_int32_t arg)
+DB_DB_ACCESS(lorder, int arg)
+DB_DB_ACCESS(pagesize, u_int32_t arg)
+DB_DB_ACCESS(re_delim, int arg)
+DB_DB_ACCESS(re_len, u_int32_t arg)
+DB_DB_ACCESS(re_pad, int arg)
+DB_DB_ACCESS(re_source, char *arg)
+DB_DB_ACCESS(q_extentsize, u_int32_t arg)
+
+// Here are the get/set methods that don't fit the above mold.
+//
+
+int Db::set_alloc(db_malloc_fcn_type malloc_fcn,
+ db_realloc_fcn_type realloc_fcn,
+ db_free_fcn_type free_fcn)
+{
+ DB *db;
+
+ db = unwrap(this);
+ return db->set_alloc(db, malloc_fcn, realloc_fcn, free_fcn);
+}
+
+void Db::set_errcall(void (*arg)(const char *, char *))
+{
+ env_->set_errcall(arg);
+}
+
+void *Db::get_app_private() const
+{
+ return unwrapConst(this)->app_private;
+}
+
+void Db::set_app_private(void *value)
+{
+ unwrap(this)->app_private = value;
+}
+
+int Db::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache)
+{
+ int ret;
+ DB *db = unwrap(this);
+
+ if ((ret = (*(db->set_cachesize))(db, gbytes, bytes, ncache)) != 0) {
+ DB_ERROR("Db::set_cachesize", ret, error_policy());
+ }
+ return (ret);
+}
+
+int Db::set_paniccall(void (*callback)(DbEnv *, int))
+{
+ return (env_->set_paniccall(callback));
+}
+
+void Db::set_error_stream(ostream *error_stream)
+{
+ env_->set_error_stream(error_stream);
+}
diff --git a/db/cxx/cxx_dbc.cpp b/db/cxx/cxx_dbc.cpp
new file mode 100644
index 000000000..721bfa2fc
--- /dev/null
+++ b/db/cxx/cxx_dbc.cpp
@@ -0,0 +1,157 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: cxx_dbc.cpp,v 11.50 2001/09/29 15:48:05 dda Exp ";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_auto.h"
+#include "crdel_auto.h"
+#include "db_ext.h"
+#include "common_ext.h"
+
+// It's private, and should never be called, but VC4.0 needs it resolved
+//
+Dbc::~Dbc()
+{
+}
+
+int Dbc::close()
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_close(cursor)) != 0) {
+ DB_ERROR("Dbc::close", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ return (0);
+}
+
+int Dbc::count(db_recno_t *countp, u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_count(cursor, countp, flags_arg)) != 0) {
+ DB_ERROR("Db::count", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ return (0);
+}
+
+int Dbc::del(u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_del(cursor, flags_arg)) != 0) {
+
+ // DB_KEYEMPTY is a "normal" return, so should not be
+ // thrown as an error
+ //
+ if (err != DB_KEYEMPTY) {
+ DB_ERROR("Dbc::del", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Dbc::dup(Dbc** cursorp, u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ DBC *new_cursor = 0;
+ int err;
+
+ if ((err = cursor->c_dup(cursor, &new_cursor, flags_arg)) != 0) {
+ DB_ERROR("Dbc::dup", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+
+ // The following cast implies that Dbc can be no larger than DBC
+ *cursorp = (Dbc*)new_cursor;
+ return (0);
+}
+
+int Dbc::get(Dbt* key, Dbt *data, u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_get(cursor, key, data, flags_arg)) != 0) {
+
+ // DB_NOTFOUND and DB_KEYEMPTY are "normal" returns,
+ // so should not be thrown as an error
+ //
+ if (err != DB_NOTFOUND && err != DB_KEYEMPTY) {
+ const char *name = "Dbc::get";
+ if (err == ENOMEM && DB_OVERFLOWED_DBT(key))
+ DB_ERROR_DBT(name, key, ON_ERROR_UNKNOWN);
+ else if (err == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT(name, data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR(name, err, ON_ERROR_UNKNOWN);
+
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Dbc::pget(Dbt* key, Dbt *pkey, Dbt *data, u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_pget(cursor, key, pkey, data, flags_arg)) != 0) {
+
+ // DB_NOTFOUND and DB_KEYEMPTY are "normal" returns,
+ // so should not be thrown as an error
+ //
+ if (err != DB_NOTFOUND && err != DB_KEYEMPTY) {
+ const char *name = "Dbc::pget";
+ if (err == ENOMEM && DB_OVERFLOWED_DBT(key))
+ DB_ERROR_DBT(name, key, ON_ERROR_UNKNOWN);
+ else if (err == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT(name, data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR(name, err, ON_ERROR_UNKNOWN);
+
+ return (err);
+ }
+ }
+ return (err);
+}
+
+int Dbc::put(Dbt* key, Dbt *data, u_int32_t flags_arg)
+{
+ DBC *cursor = this;
+ int err;
+
+ if ((err = cursor->c_put(cursor, key, data, flags_arg)) != 0) {
+
+ // DB_KEYEXIST is a "normal" return, so should not be
+ // thrown as an error
+ //
+ if (err != DB_KEYEXIST) {
+ DB_ERROR("Dbc::put", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ }
+ return (err);
+}
diff --git a/db/cxx/cxx_dbt.cpp b/db/cxx/cxx_dbt.cpp
new file mode 100644
index 000000000..1c06f6f38
--- /dev/null
+++ b/db/cxx/cxx_dbt.cpp
@@ -0,0 +1,60 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: cxx_dbt.cpp,v 11.49 2001/07/28 20:01:18 dda Exp ";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_auto.h"
+#include "crdel_auto.h"
+#include "db_ext.h"
+#include "common_ext.h"
+
+Dbt::Dbt()
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+}
+
+Dbt::Dbt(void *data_arg, size_t size_arg)
+{
+ DBT *dbt = this;
+ memset(dbt, 0, sizeof(DBT));
+ set_data(data_arg);
+ set_size(size_arg);
+}
+
+Dbt::~Dbt()
+{
+}
+
+Dbt::Dbt(const Dbt &that)
+{
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+}
+
+Dbt &Dbt::operator = (const Dbt &that)
+{
+ if (this != &that) {
+ const DBT *from = &that;
+ DBT *to = this;
+ memcpy(to, from, sizeof(DBT));
+ }
+ return (*this);
+}
diff --git a/db/cxx/cxx_env.cpp b/db/cxx/cxx_env.cpp
new file mode 100644
index 000000000..7c7d3ab82
--- /dev/null
+++ b/db/cxx/cxx_env.cpp
@@ -0,0 +1,1045 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: cxx_env.cpp,v 11.58 2001/10/04 21:13:59 bostic Exp ";
+#endif /* not lint */
+
+#include <errno.h>
+#include <stdio.h> // needed for set_error_stream
+#include <string.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+#include "db_int.h"
+#include "common_ext.h"
+
+// This datatype is needed for picky compilers.
+//
+extern "C" {
+ typedef void (*db_errcall_fcn_type)
+ (const char *, char *);
+};
+
+// The reason for a static variable is that some structures
+// (like Dbts) have no connection to any Db or DbEnv, so when
+// errors occur in their methods, we must have some reasonable
+// way to determine whether to throw or return errors.
+//
+// This variable is taken from flags whenever a DbEnv is constructed.
+// Normally there is only one DbEnv per program, and even if not,
+// there is typically a single policy of throwing or returning.
+//
+static int last_known_error_policy = ON_ERROR_UNKNOWN;
+
+ostream *DbEnv::error_stream_ = 0;
+
+// These 'glue' function are declared as extern "C" so they will
+// be compatible with picky compilers that do not allow mixing
+// of function pointers to 'C' functions with function pointers
+// to C++ functions.
+//
+extern "C"
+void _feedback_intercept_c(DB_ENV *env, int opcode, int pct)
+{
+ DbEnv::_feedback_intercept(env, opcode, pct);
+}
+
+extern "C"
+void _paniccall_intercept_c(DB_ENV *env, int errval)
+{
+ DbEnv::_paniccall_intercept(env, errval);
+}
+
+extern "C"
+int _recovery_init_intercept_c(DB_ENV *env)
+{
+ return (DbEnv::_recovery_init_intercept(env));
+}
+
+extern "C"
+void _stream_error_function_c(const char *prefix, char *message)
+{
+ DbEnv::_stream_error_function(prefix, message);
+}
+
+extern "C"
+int _tx_recover_intercept_c(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ return (DbEnv::_tx_recover_intercept(env, dbt, lsn, op));
+}
+
+extern "C"
+int _rep_send_intercept_c(DB_ENV *env, void *cookie, const DBT *cntrl,
+ DBT *data, u_int32_t flags, int id)
+{
+ return (DbEnv::_rep_send_intercept(env,
+ cookie, cntrl, data, flags, id));
+}
+
+// _destroy_check is called when there is a user error in a
+// destructor, specifically when close has not been called for an
+// object (even if it was never opened). If the DbEnv is being
+// destroyed we cannot always use DbEnv::error_stream_, so we'll
+// use cerr in that case.
+//
+void DbEnv::_destroy_check(const char *str, int isDbEnv)
+{
+ ostream *out;
+
+ out = error_stream_;
+ if (out == NULL || isDbEnv == 1)
+ out = &cerr;
+
+ (*out) << "DbEnv::_destroy_check: open " << str << " object destroyed\n";
+}
+
+void DbEnv::_feedback_intercept(DB_ENV *env, int opcode, int pct)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ DbEnv *cxxenv = (DbEnv *)env->cj_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return;
+ }
+ if (cxxenv->feedback_callback_ == 0) {
+ DB_ERROR("DbEnv::feedback_callback", EINVAL,
+ cxxenv->error_policy());
+ return;
+ }
+ (*cxxenv->feedback_callback_)(cxxenv, opcode, pct);
+}
+
+void DbEnv::_paniccall_intercept(DB_ENV *env, int errval)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL, ON_ERROR_UNKNOWN);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->cj_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL, ON_ERROR_UNKNOWN);
+ }
+ if (cxxenv->paniccall_callback_ == 0) {
+ DB_ERROR("DbEnv::paniccall_callback", EINVAL, cxxenv->error_policy());
+ }
+ (*cxxenv->paniccall_callback_)(cxxenv, errval);
+}
+
+int DbEnv::_recovery_init_intercept(DB_ENV *env)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::recovery_init_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->cj_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::recovery_init_callback", EINVAL,
+ ON_ERROR_UNKNOWN);
+ }
+ if (cxxenv->recovery_init_callback_ == 0) {
+ DB_ERROR("DbEnv::recovery_init_callback", EINVAL,
+ cxxenv->error_policy());
+ }
+ return ((*cxxenv->recovery_init_callback_)(cxxenv));
+}
+
+int DbEnv::_tx_recover_intercept(DB_ENV *env, DBT *dbt,
+ DB_LSN *lsn, db_recops op)
+{
+ if (env == 0) {
+ DB_ERROR("DbEnv::tx_recover_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->cj_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::tx_recover_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ if (cxxenv->tx_recover_callback_ == 0) {
+ DB_ERROR("DbEnv::tx_recover_callback", EINVAL, cxxenv->error_policy());
+ return (EINVAL);
+ }
+ Dbt *cxxdbt = (Dbt *)dbt;
+ DbLsn *cxxlsn = (DbLsn *)lsn;
+ return ((*cxxenv->tx_recover_callback_)(cxxenv, cxxdbt, cxxlsn, op));
+}
+
+int DbEnv::_rep_send_intercept(DB_ENV *env, void *cookie, const DBT *cntrl,
+ DBT *data, u_int32_t flags, int id)
+{
+
+ if (env == 0) {
+ DB_ERROR("DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ DbEnv *cxxenv = (DbEnv *)env->cj_internal;
+ if (cxxenv == 0) {
+ DB_ERROR("DbEnv::rep_send_callback", EINVAL, ON_ERROR_UNKNOWN);
+ return (EINVAL);
+ }
+ const Dbt *cxxcntrl = (const Dbt *)cntrl;
+ Dbt *cxxdata = (Dbt *)data;
+ return ((*cxxenv->rep_send_callback_)(cxxenv,
+ cookie, cxxcntrl, cxxdata, flags, id));
+}
+
+// A truism for the DbEnv object is that there is a valid
+// DB_ENV handle from the constructor until close().
+// After the close, the DB_ENV handle is invalid and
+// no operations are permitted on the DbEnv (other than
+// destructor). Leaving the DbEnv handle open and not
+// doing a close is generally considered an error.
+//
+// We used to allow DbEnv objects to be closed and reopened.
+// This implied always keeping a valid DB_ENV object, and
+// coordinating the open objects between Db/DbEnv turned
+// out to be overly complicated. Now we do not allow this.
+
+DbEnv::DbEnv(u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, tx_recover_callback_(0)
+, paniccall_callback_(0)
+, rep_send_callback_(0)
+{
+ int err;
+
+ COMPQUIET(err, 0);
+ if ((err = initialize(0)) != 0)
+ DB_ERROR("DbEnv::DbEnv", err, error_policy());
+}
+
+DbEnv::DbEnv(DB_ENV *env, u_int32_t flags)
+: imp_(0)
+, construct_error_(0)
+, construct_flags_(flags)
+, tx_recover_callback_(0)
+, paniccall_callback_(0)
+, rep_send_callback_(0)
+{
+ int err;
+
+ COMPQUIET(err, 0);
+ if ((err = initialize(env)) != 0)
+ DB_ERROR("DbEnv::DbEnv", err, error_policy());
+}
+
+// Note: if the user has not closed, we call _destroy_check
+// to warn against this non-safe programming practice,
+// and call close anyway.
+//
+DbEnv::~DbEnv()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ _destroy_check("DbEnv", 1);
+ (void)env->close(env, 0);
+
+ // extra safety
+ cleanup();
+ }
+}
+
+// called by Db destructor when the DbEnv is owned by DB.
+void DbEnv::cleanup()
+{
+ DB_ENV *env = unwrap(this);
+
+ if (env != NULL) {
+ env->cj_internal = 0;
+ imp_ = 0;
+ }
+}
+
+int DbEnv::close(u_int32_t flags)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ // after a close (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ // It's safe to throw an error after the close,
+ // since our error mechanism does not peer into
+ // the DB* structures.
+ //
+ if ((err = env->close(env, flags)) != 0) {
+ DB_ERROR("DbEnv::close", err, error_policy());
+ }
+ return (err);
+}
+
+void DbEnv::err(int error, const char *format, ...)
+{
+ va_list args;
+ DB_ENV *env = unwrap(this);
+
+ va_start(args, format);
+ __db_real_err(env, error, 1, 1, format, args);
+ va_end(args);
+}
+
+// Return a tristate value corresponding to whether we should
+// throw exceptions on errors:
+// ON_ERROR_RETURN
+// ON_ERROR_THROW
+// ON_ERROR_UNKNOWN
+//
+int DbEnv::error_policy()
+{
+ if ((construct_flags_ & DB_CXX_NO_EXCEPTIONS) != 0) {
+ return (ON_ERROR_RETURN);
+ }
+ else {
+ return (ON_ERROR_THROW);
+ }
+}
+
+void DbEnv::errx(const char *format, ...)
+{
+ va_list args;
+ DB_ENV *env = unwrap(this);
+
+ va_start(args, format);
+ __db_real_err(env, 0, 0, 1, format, args);
+ va_end(args);
+}
+
+void *DbEnv::get_app_private() const
+{
+ return unwrapConst(this)->app_private;
+}
+
+// used internally during constructor
+// to associate an existing DB_ENV with this DbEnv,
+// or create a new one. If there is an error,
+// construct_error_ is set; this is examined during open.
+//
+int DbEnv::initialize(DB_ENV *env)
+{
+ int err;
+
+ last_known_error_policy = error_policy();
+
+ if (env == 0) {
+ // Create a new DB_ENV environment.
+ if ((err = ::db_env_create(&env,
+ construct_flags_ & ~DB_CXX_NO_EXCEPTIONS)) != 0) {
+ construct_error_ = err;
+ return (err);
+ }
+ }
+ imp_ = wrap(env);
+ env->cj_internal = this; // for DB_ENV* to DbEnv* conversion
+ return (0);
+}
+
+int DbEnv::lock_detect(u_int32_t flags, u_int32_t atype, int *aborted)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = env->lock_detect(env, flags, atype, aborted)) != 0) {
+ DB_ERROR("DbEnv::lock_detect", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::lock_get(u_int32_t locker, u_int32_t flags, const Dbt *obj,
+ db_lockmode_t lock_mode, DbLock *lock)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = env->lock_get(env, locker, flags, obj,
+ lock_mode, &lock->lock_)) != 0) {
+ DB_ERROR("DbEnv::lock_get", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::lock_id(u_int32_t *idp)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = env->lock_id(env, idp)) != 0) {
+ DB_ERROR("DbEnv::lock_id", err, error_policy());
+ }
+ return (err);
+}
+
+int DbEnv::lock_id_free(u_int32_t id)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = env->lock_id_free(env, id)) != 0) {
+ DB_ERROR("DbEnv::lock_id_free", err, error_policy());
+ }
+ return (err);
+}
+
+int DbEnv::lock_stat(DB_LOCK_STAT **statp, u_int32_t flags)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = env->lock_stat(env, statp, flags)) != 0) {
+ DB_ERROR("DbEnv::lock_stat", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::lock_vec(u_int32_t locker, u_int32_t flags,
+ DB_LOCKREQ list[],
+ int nlist, DB_LOCKREQ **elist_returned)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = env->lock_vec(env, locker, flags, list,
+ nlist, elist_returned)) != 0) {
+ DB_ERROR("DbEnv::lock_vec", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::log_archive(char **list[], u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = env->log_archive(env, list, flags)) != 0) {
+ DB_ERROR("DbEnv::log_archive", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_compare(const DbLsn *lsn0, const DbLsn *lsn1)
+{
+ return (::log_compare(lsn0, lsn1));
+}
+
+int DbEnv::log_cursor(DbLogc **cursorp, u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+ DB_LOGC *dblogc = NULL;
+
+ if ((err = env->log_cursor(env, &dblogc, flags)) != 0) {
+ DB_ERROR("DbEnv::log_cursor", err, error_policy());
+ return (err);
+ }
+
+ // The following cast implies that DbLogc can be no larger than DB_LOGC
+ *cursorp = (DbLogc*)dblogc;
+ return (0);
+}
+
+int DbEnv::log_file(DbLsn *lsn, char *namep, size_t len)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = env->log_file(env, lsn, namep, len)) != 0) {
+ DB_ERROR("DbEnv::log_file", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_flush(const DbLsn *lsn)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = env->log_flush(env, lsn)) != 0) {
+ DB_ERROR("DbEnv::log_flush", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_put(DbLsn *lsn, const Dbt *data, u_int32_t flags)
+{
+ int err = 0;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = env->log_put(env, lsn, data, flags)) != 0) {
+ DB_ERROR("DbEnv::log_put", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_register(Db *dbp, const char *name)
+{
+ int err = 0;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = env->log_register(env, unwrap(dbp), name)) != 0) {
+ DB_ERROR("DbEnv::log_register", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_stat(DB_LOG_STAT **spp, u_int32_t flags)
+{
+ int err = 0;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = env->log_stat(env, spp, flags)) != 0) {
+ DB_ERROR("DbEnv::log_stat", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::log_unregister(Db *dbp)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+
+ if ((err = env->log_unregister(env, unwrap(dbp))) != 0) {
+ DB_ERROR("DbEnv::log_unregister", err, error_policy());
+ return (err);
+ }
+ return (0);
+}
+
+int DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags)
+{
+ DB_ENV *env;
+ int err;
+ DB_MPOOLFILE *mpf;
+
+ env = unwrap(this);
+ if (env == NULL) {
+ err = EINVAL;
+ }
+ else if ((err = env->memp_fcreate(env, &mpf, flags)) != 0) {
+ DB_ERROR("DbMpoolFile::f_create", err, ON_ERROR_UNKNOWN);
+ }
+ else {
+ *dbmfp = new DbMpoolFile();
+ (*dbmfp)->imp_ = wrap(mpf);
+ }
+ return (err);
+}
+
+int DbEnv::memp_register(int ftype,
+ pgin_fcn_type pgin_fcn,
+ pgout_fcn_type pgout_fcn)
+{
+ DB_ENV *env;
+ int err;
+
+ env = unwrap(this);
+ if ((err = env->memp_register(env, ftype, pgin_fcn, pgout_fcn)) != 0) {
+ DB_ERROR("DbEnv::memp_register", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::memp_stat(DB_MPOOL_STAT **gsp,
+ DB_MPOOL_FSTAT ***fsp, u_int32_t flags)
+{
+ DB_ENV *env;
+ int err;
+
+ env = unwrap(this);
+ if ((err = env->memp_stat(env, gsp, fsp, flags)) != 0) {
+ DB_ERROR("DbEnv::memp_stat", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::memp_sync(DbLsn *sn)
+{
+ DB_ENV *env;
+ int err;
+
+ env = unwrap(this);
+ if ((err = env->memp_sync(env, sn)) != 0 && err != DB_INCOMPLETE) {
+ DB_ERROR("DbEnv::memp_sync", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::memp_trickle(int pct, int *nwrotep)
+{
+ DB_ENV *env;
+ int err;
+
+ env = unwrap(this);
+ if ((err = env->memp_trickle(env, pct, nwrotep)) != 0) {
+ DB_ERROR("DbEnv::memp_trickle", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+// If an error occurred during the constructor, report it now.
+// Otherwise, call the underlying DB->open method.
+//
+int DbEnv::open(const char *db_home, u_int32_t flags, int mode)
+{
+ DB_ENV *env = unwrap(this);
+ int err;
+
+ if ((err = construct_error_) != 0)
+ DB_ERROR("Db::open", err, error_policy());
+ else if ((err = env->open(env, db_home, flags, mode)) != 0)
+ DB_ERROR("DbEnv::open", err, error_policy());
+
+ return (err);
+}
+
+int DbEnv::remove(const char *db_home, u_int32_t flags)
+{
+ DB_ENV *env;
+ int ret;
+
+ env = unwrap(this);
+
+ // after a remove (no matter if success or failure),
+ // the underlying DB_ENV object must not be accessed,
+ // so we clean up in advance.
+ //
+ cleanup();
+
+ if ((ret = env->remove(env, db_home, flags)) != 0)
+ DB_ERROR("DbEnv::remove", ret, error_policy());
+
+ return (ret);
+}
+
+// Report an error associated with the DbEnv.
+// error_policy is one of:
+// ON_ERROR_THROW throw an error
+// ON_ERROR_RETURN do nothing here, the caller will return an error
+// ON_ERROR_UNKNOWN defer the policy to policy saved in DbEnv::DbEnv
+//
+void DbEnv::runtime_error(const char *caller, int error, int error_policy)
+{
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ DbException except(caller, error);
+ throw except;
+ }
+}
+
+// Like DbEnv::runtime_error, but issue a DbMemoryException
+// based on the fact that this Dbt is not large enough.
+void DbEnv::runtime_error_dbt(const char *caller, Dbt *dbt, int error_policy)
+{
+ if (error_policy == ON_ERROR_UNKNOWN)
+ error_policy = last_known_error_policy;
+ if (error_policy == ON_ERROR_THROW) {
+ // Creating and throwing the object in two separate
+ // statements seems to be necessary for HP compilers.
+ DbMemoryException except(caller, dbt);
+ throw except;
+ }
+}
+
+// static method
+char *DbEnv::strerror(int error)
+{
+ return (db_strerror(error));
+}
+
+void DbEnv::_stream_error_function(const char *prefix, char *message)
+{
+ // HP compilers need the extra casts, we don't know why.
+ if (error_stream_) {
+ if (prefix) {
+ (*error_stream_) << prefix << (const char *)": ";
+ }
+ if (message) {
+ (*error_stream_) << (const char *)message;
+ }
+ (*error_stream_) << (const char *)"\n";
+ }
+}
+
+// set methods
+
+// This is a variant of the DB_WO_ACCESS macro to define a simple set_
+// method calling the underlying C method, but unlike a simple
+// set method, it may return an error or raise an exception.
+// Note this macro expects that input _argspec is an argument
+// list element (e.g. "char *arg") defined in terms of "arg".
+//
+#define DB_DBENV_ACCESS(_name, _argspec) \
+ \
+int DbEnv::set_##_name(_argspec) \
+{ \
+ int ret; \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ if ((ret = (*(dbenv->set_##_name))(dbenv, arg)) != 0) {\
+ DB_ERROR("DbEnv::set_" # _name, ret, error_policy()); \
+ } \
+ return (ret); \
+}
+
+#define DB_DBENV_ACCESS_NORET(_name, _argspec) \
+ \
+void DbEnv::set_##_name(_argspec) \
+{ \
+ DB_ENV *dbenv = unwrap(this); \
+ \
+ (*(dbenv->set_##_name))(dbenv, arg); \
+ return; \
+}
+
+DB_DBENV_ACCESS_NORET(errfile, FILE *arg)
+DB_DBENV_ACCESS_NORET(errpfx, const char *arg)
+
+// We keep these alphabetical by field name,
+// for comparison with Java's list.
+//
+DB_DBENV_ACCESS(data_dir, const char *arg)
+DB_DBENV_ACCESS(lg_bsize, u_int32_t arg)
+DB_DBENV_ACCESS(lg_dir, const char *arg)
+DB_DBENV_ACCESS(lg_max, u_int32_t arg)
+DB_DBENV_ACCESS(lg_regionmax, u_int32_t arg)
+DB_DBENV_ACCESS(lk_detect, u_int32_t arg)
+DB_DBENV_ACCESS(lk_max, u_int32_t arg)
+DB_DBENV_ACCESS(lk_max_lockers, u_int32_t arg)
+DB_DBENV_ACCESS(lk_max_locks, u_int32_t arg)
+DB_DBENV_ACCESS(lk_max_objects, u_int32_t arg)
+DB_DBENV_ACCESS(mp_mmapsize, size_t arg)
+DB_DBENV_ACCESS(tmp_dir, const char *arg)
+DB_DBENV_ACCESS(tx_max, u_int32_t arg)
+
+// Here are the get/set methods that don't fit the above mold.
+//
+
+int DbEnv::set_alloc(db_malloc_fcn_type malloc_fcn,
+ db_realloc_fcn_type realloc_fcn,
+ db_free_fcn_type free_fcn)
+{
+ DB_ENV *dbenv;
+
+ dbenv = unwrap(this);
+ return dbenv->set_alloc(dbenv, malloc_fcn, realloc_fcn, free_fcn);
+}
+
+void DbEnv::set_app_private(void *value)
+{
+ unwrap(this)->app_private = value;
+}
+
+int DbEnv::set_cachesize(u_int32_t gbytes, u_int32_t bytes, int ncache)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret =
+ (*(dbenv->set_cachesize))(dbenv, gbytes, bytes, ncache)) != 0)
+ DB_ERROR("DbEnv::set_cachesize", ret, error_policy());
+
+ return (ret);
+}
+
+void DbEnv::set_errcall(void (*arg)(const char *, char *))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ // XXX
+ // We are casting from a function ptr declared with C++
+ // linkage to one (same arg types) declared with C
+ // linkage. It's hard to imagine a pair of C/C++
+ // compilers from the same vendor for which this
+ // won't work. Unfortunately, we can't use a
+ // intercept function like the others since the
+ // function does not have a (DbEnv*) as one of
+ // the args. If this causes trouble, we can pull
+ // the same trick we use in Java, namely stuffing
+ // a (DbEnv*) pointer into the prefix. We're
+ // avoiding this for the moment because it obfuscates.
+ //
+ (*(dbenv->set_errcall))(dbenv, (db_errcall_fcn_type)arg);
+}
+
+// Note: This actually behaves a bit like a static function,
+// since DB_ENV.db_errcall has no information about which
+// db_env triggered the call. A user that has multiple DB_ENVs
+// will simply not be able to have different streams for each one.
+//
+void DbEnv::set_error_stream(ostream *stream)
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ error_stream_ = stream;
+ dbenv->set_errcall(dbenv, (stream == 0) ? 0 :
+ _stream_error_function_c);
+}
+
+int DbEnv::set_feedback(void (*arg)(DbEnv *, int, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ feedback_callback_ = arg;
+
+ return ((*(dbenv->set_feedback))(dbenv, _feedback_intercept_c));
+}
+
+int DbEnv::set_flags(u_int32_t flags, int onoff)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = (dbenv->set_flags)(dbenv, flags, onoff)) != 0)
+ DB_ERROR("DbEnv::set_flags", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_lk_conflicts(u_int8_t *lk_conflicts, int lk_max)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = (*(dbenv->set_lk_conflicts))
+ (dbenv, lk_conflicts, lk_max)) != 0)
+ DB_ERROR("DbEnv::set_lk_conflicts", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_paniccall(void (*arg)(DbEnv *, int))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ paniccall_callback_ = arg;
+
+ return ((*(dbenv->set_paniccall))(dbenv, _paniccall_intercept_c));
+}
+
+int DbEnv::set_recovery_init(int (*arg)(DbEnv *))
+{
+ DB_ENV *dbenv = unwrap(this);
+
+ recovery_init_callback_ = arg;
+
+ return ((*(dbenv->set_recovery_init))(dbenv, _recovery_init_intercept_c));
+}
+
+int DbEnv::set_rpc_server(void *cl, char *host, long tsec, long ssec, u_int32_t flags)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = dbenv->set_rpc_server(dbenv, cl, host, tsec, ssec, flags))
+ != 0)
+ DB_ERROR("DbEnv::set_rpc_server", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_shm_key(long shm_key)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = dbenv->set_shm_key(dbenv, shm_key)) != 0)
+ DB_ERROR("DbEnv::set_shm_key", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_tas_spins(u_int32_t arg)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = dbenv->set_tas_spins(dbenv, arg)) != 0)
+ DB_ERROR("DbEnv::set_tas_spins", ret, last_known_error_policy);
+
+ return (ret);
+}
+
+int DbEnv::set_tx_recover
+ (int (*arg)(DbEnv *, Dbt *, DbLsn *, db_recops))
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ tx_recover_callback_ = arg;
+ if ((ret =
+ (*(dbenv->set_tx_recover))(dbenv, _tx_recover_intercept_c)) != 0)
+ DB_ERROR("DbEnv::set_tx_recover", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_tx_timestamp(time_t *timestamp)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = dbenv->set_tx_timestamp(dbenv, timestamp)) != 0)
+ DB_ERROR("DbEnv::set_tx_timestamp", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_verbose(u_int32_t which, int onoff)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ if ((ret = (*(dbenv->set_verbose))(dbenv, which, onoff)) != 0)
+ DB_ERROR("DbEnv::set_verbose", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::txn_begin(DbTxn *pid, DbTxn **tid, u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+ DB_TXN *txn;
+
+ if ((err = env->txn_begin(env, unwrap(pid), &txn, flags)) != 0) {
+ DB_ERROR("DbEnv::txn_begin", err, error_policy());
+ return (err);
+ }
+ DbTxn *result = new DbTxn();
+ result->imp_ = wrap(txn);
+ *tid = result;
+ return (err);
+}
+
+int DbEnv::txn_checkpoint(u_int32_t kbyte, u_int32_t min, u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+ if ((err = env->txn_checkpoint(env, kbyte, min, flags)) != 0 &&
+ err != DB_INCOMPLETE) {
+ DB_ERROR("DbEnv::txn_checkpoint", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::txn_recover(DB_PREPLIST *preplist, long count,
+ long *retp, u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+ if ((err = env->txn_recover(env, preplist, count, retp, flags)) != 0) {
+ DB_ERROR("DbEnv::txn_recover", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::txn_stat(DB_TXN_STAT **statp, u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+ if ((err = env->txn_stat(env, statp, flags)) != 0) {
+ DB_ERROR("DbEnv::txn_stat", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+int DbEnv::set_rep_transport(u_int32_t myid, void *cookie,
+ int (*f_send)(DbEnv *, void *, const Dbt *, Dbt *, u_int32_t, int))
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+
+ rep_send_callback_ = f_send;
+ if ((ret = dbenv->set_rep_transport(dbenv,
+ myid, cookie, _rep_send_intercept_c)) != 0)
+ DB_ERROR("DbEnv::set_rep_transport", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::rep_elect(int nsites,
+ int pri, u_int32_t wait, u_int32_t sleep, int *idp, int *selfp)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+ if ((ret = dbenv->rep_elect(dbenv,
+ nsites, pri, wait, sleep, idp, selfp)) != 0)
+ DB_ERROR("DbEnv::rep_elect", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::rep_process_message(Dbt *control, Dbt *rec, int *idp)
+{
+ int ret;
+ DB_ENV *dbenv = unwrap(this);
+ if ((ret = dbenv->rep_process_message(dbenv, control, rec, idp)) != 0 &&
+ ret != DB_REP_HOLDELECTION && ret != DB_REP_NEWSITE &&
+ ret != DB_REP_NEWMASTER && ret != DB_REP_OUTDATED)
+ DB_ERROR("DbEnv::rep_process_message", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::rep_start(Dbt *cookie, u_int32_t flags)
+{
+ int ret;
+ DBT *cxxcookie = (DBT *)cookie;
+ DB_ENV *dbenv = unwrap(this);
+ if ((ret = dbenv->rep_start(dbenv, cxxcookie, flags)) != 0)
+ DB_ERROR("DbEnv::rep_start", ret, error_policy());
+
+ return (ret);
+}
+
+int DbEnv::set_timeout(db_timeout_t timeout, u_int32_t flags)
+{
+ int err;
+ DB_ENV *env = unwrap(this);
+ if ((err = env->set_timeout(env, timeout, flags)) != 0) {
+ DB_ERROR("DbEnv::set_timeout", err, error_policy());
+ return (err);
+ }
+ return (err);
+}
+
+// static method
+char *DbEnv::version(int *major, int *minor, int *patch)
+{
+ return (db_version(major, minor, patch));
+}
diff --git a/db/cxx/cxx_logc.cpp b/db/cxx/cxx_logc.cpp
new file mode 100644
index 000000000..f946b2523
--- /dev/null
+++ b/db/cxx/cxx_logc.cpp
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: cxx_logc.cpp,v 11.2 2001/10/02 01:33:37 bostic Exp ";
+#endif /* not lint */
+
+#include <errno.h>
+#include <string.h>
+
+#include "db_cxx.h"
+#include "cxx_int.h"
+
+#include "db_int.h"
+#include "db_page.h"
+#include "db_auto.h"
+#include "crdel_auto.h"
+#include "db_ext.h"
+#include "common_ext.h"
+
+// It's private, and should never be called,
+// but some compilers need it resolved
+//
+DbLogc::~DbLogc()
+{
+}
+
+int DbLogc::close(u_int32_t flags)
+{
+ DB_LOGC *cursor = this;
+ int err;
+
+ if ((err = cursor->close(cursor, flags)) != 0) {
+ DB_ERROR("DbLogc::close", err, ON_ERROR_UNKNOWN);
+ return (err);
+ }
+ return (0);
+}
+
+int DbLogc::get(DbLsn *lsn, Dbt *data, u_int32_t flags)
+{
+ DB_LOGC *cursor = this;
+ int err;
+
+ if ((err = cursor->get(cursor, lsn, data, flags)) != 0) {
+
+ // DB_NOTFOUND is a "normal" returns,
+ // so should not be thrown as an error
+ //
+ if (err != DB_NOTFOUND) {
+ const char *name = "DbLogc::get";
+ if (err == ENOMEM && DB_OVERFLOWED_DBT(data))
+ DB_ERROR_DBT(name, data, ON_ERROR_UNKNOWN);
+ else
+ DB_ERROR(name, err, ON_ERROR_UNKNOWN);
+
+ return (err);
+ }
+ }
+ return (err);
+}
diff --git a/db/dist/aclocal_java/ac_check_class.ac b/db/dist/aclocal_java/ac_check_class.ac
new file mode 100644
index 000000000..51c4cd5c3
--- /dev/null
+++ b/db/dist/aclocal_java/ac_check_class.ac
@@ -0,0 +1,107 @@
+dnl @synopsis AC_CHECK_CLASS
+dnl
+dnl AC_CHECK_CLASS tests the existence of a given Java class, either in
+dnl a jar or in a '.class' file.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version Id: ac_check_class.ac,v 1.1 2001/08/23 16:58:42 dda Exp
+dnl
+AC_DEFUN([AC_CHECK_CLASS],[
+AC_REQUIRE([AC_PROG_JAVA])
+ac_var_name=`echo $1 | sed 's/\./_/g'`
+dnl Normaly I'd use a AC_CACHE_CHECK here but since the variable name is
+dnl dynamic I need an extra level of extraction
+AC_MSG_CHECKING([for $1 class])
+AC_CACHE_VAL(ac_cv_class_$ac_var_name, [
+if test x$ac_cv_prog_uudecode_base64 = xyes; then
+dnl /**
+dnl * Test.java: used to test dynamicaly if a class exists.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl Class lib;
+dnl if (argv.length < 1)
+dnl {
+dnl System.err.println ("Missing argument");
+dnl System.exit (77);
+dnl }
+dnl try
+dnl {
+dnl lib = Class.forName (argv[0]);
+dnl }
+dnl catch (ClassNotFoundException e)
+dnl {
+dnl System.exit (1);
+dnl }
+dnl lib = null;
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AKQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAANlcnIBABVMamF2YS9pby9QcmludFN0cmVhbTsJ
+AA0ACQcADgEAEGphdmEvbGFuZy9TeXN0ZW0IABABABBNaXNzaW5nIGFyZ3Vt
+ZW50DAASABMBAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWCgAV
+ABEHABYBABNqYXZhL2lvL1ByaW50U3RyZWFtDAAYABkBAARleGl0AQAEKEkp
+VgoADQAXDAAcAB0BAAdmb3JOYW1lAQAlKExqYXZhL2xhbmcvU3RyaW5nOylM
+amF2YS9sYW5nL0NsYXNzOwoAHwAbBwAgAQAPamF2YS9sYW5nL0NsYXNzBwAi
+AQAgamF2YS9sYW5nL0NsYXNzTm90Rm91bmRFeGNlcHRpb24BAAY8aW5pdD4B
+AAMoKVYMACMAJAoAAwAlAQAKU291cmNlRmlsZQEACVRlc3QuamF2YQAhAAEA
+AwAAAAAAAgAJAAUABgABAAcAAABtAAMAAwAAACkqvgSiABCyAAwSD7YAFBBN
+uAAaKgMyuAAeTKcACE0EuAAaAUwDuAAasQABABMAGgAdACEAAQAIAAAAKgAK
+AAAACgAAAAsABgANAA4ADgATABAAEwASAB4AFgAiABgAJAAZACgAGgABACMA
+JAABAAcAAAAhAAEAAQAAAAUqtwAmsQAAAAEACAAAAAoAAgAAAAQABAAEAAEA
+JwAAAAIAKA==
+====
+EOF
+ if uudecode$EXEEXT Test.uue; then
+ :
+ else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC
+ echo "configure: failed file was:" >&AC_FD_CC
+ cat Test.uue >&AC_FD_CC
+ ac_cv_prog_uudecode_base64=no
+ fi
+ rm -f Test.uue
+ if AC_TRY_COMMAND($JAVA $JAVAFLAGS Test $1) >/dev/null 2>&1; then
+ eval "ac_cv_class_$ac_var_name=yes"
+ else
+ eval "ac_cv_class_$ac_var_name=no"
+ fi
+ rm -f Test.class
+else
+ AC_TRY_COMPILE_JAVA([$1], , [eval "ac_cv_class_$ac_var_name=yes"],
+ [eval "ac_cv_class_$ac_var_name=no"])
+fi
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+eval "HAVE_$ac_var_name=$`echo ac_cv_class_$ac_var_val`"
+HAVE_LAST_CLASS=$ac_var_val
+if test x$ac_var_val = xyes; then
+ ifelse([$2], , :, [$2])
+else
+ ifelse([$3], , :, [$3])
+fi
+])
+dnl for some reason the above statment didn't fall though here?
+dnl do scripts have variable scoping?
+eval "ac_var_val=$`eval echo ac_cv_class_$ac_var_name`"
+AC_MSG_RESULT($ac_var_val)
+])
diff --git a/db/dist/aclocal_java/ac_check_classpath.ac b/db/dist/aclocal_java/ac_check_classpath.ac
new file mode 100644
index 000000000..5c4f60c18
--- /dev/null
+++ b/db/dist/aclocal_java/ac_check_classpath.ac
@@ -0,0 +1,23 @@
+dnl @synopsis AC_CHECK_CLASSPATH
+dnl
+dnl AC_CHECK_CLASSPATH just displays the CLASSPATH, for the edification
+dnl of the user.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version Id: ac_check_classpath.ac,v 1.1 2001/08/23 16:58:42 dda Exp
+dnl
+AC_DEFUN([AC_CHECK_CLASSPATH],[
+if test "x$CLASSPATH" = x; then
+ echo "You have no CLASSPATH, I hope it is good"
+else
+ echo "You have CLASSPATH $CLASSPATH, hope it is correct"
+fi
+])
diff --git a/db/dist/aclocal_java/ac_check_junit.ac b/db/dist/aclocal_java/ac_check_junit.ac
new file mode 100644
index 000000000..d004f6ff2
--- /dev/null
+++ b/db/dist/aclocal_java/ac_check_junit.ac
@@ -0,0 +1,54 @@
+dnl @synopsis AC_CHECK_JUNIT
+dnl
+dnl AC_CHECK_JUNIT tests the availability of the Junit testing
+dnl framework, and set some variables for conditional compilation
+dnl of the test suite by automake.
+dnl
+dnl If available, JUNIT is set to a command launching the text
+dnl based user interface of Junit, @JAVA_JUNIT@ is set to $JAVA_JUNIT
+dnl and @TESTS_JUNIT@ is set to $TESTS_JUNIT, otherwise they are set
+dnl to empty values.
+dnl
+dnl You can use these variables in your Makefile.am file like this :
+dnl
+dnl # Some of the following classes are built only if junit is available
+dnl JAVA_JUNIT = Class1Test.java Class2Test.java AllJunitTests.java
+dnl
+dnl noinst_JAVA = Example1.java Example2.java @JAVA_JUNIT@
+dnl
+dnl EXTRA_JAVA = $(JAVA_JUNIT)
+dnl
+dnl TESTS_JUNIT = AllJunitTests
+dnl
+dnl TESTS = StandaloneTest1 StandaloneTest2 @TESTS_JUNIT@
+dnl
+dnl EXTRA_TESTS = $(TESTS_JUNIT)
+dnl
+dnl AllJunitTests :
+dnl echo "#! /bin/sh" > $@
+dnl echo "exec @JUNIT@ my.package.name.AllJunitTests" >> $@
+dnl chmod +x $@
+dnl
+dnl @author Luc Maisonobe
+dnl @version Id: ac_check_junit.ac,v 1.1 2001/08/23 16:58:43 dda Exp
+dnl
+AC_DEFUN([AC_CHECK_JUNIT],[
+AC_CACHE_VAL(ac_cv_prog_JUNIT,[
+AC_CHECK_CLASS(junit.textui.TestRunner)
+if test x"`eval 'echo $ac_cv_class_junit_textui_TestRunner'`" != xno ; then
+ ac_cv_prog_JUNIT='$(CLASSPATH_ENV) $(JAVA) $(JAVAFLAGS) junit.textui.TestRunner'
+fi])
+AC_MSG_CHECKING([for junit])
+if test x"`eval 'echo $ac_cv_prog_JUNIT'`" != x ; then
+ JUNIT="$ac_cv_prog_JUNIT"
+ JAVA_JUNIT='$(JAVA_JUNIT)'
+ TESTS_JUNIT='$(TESTS_JUNIT)'
+else
+ JUNIT=
+ JAVA_JUNIT=
+ TESTS_JUNIT=
+fi
+AC_MSG_RESULT($JAVA_JUNIT)
+AC_SUBST(JUNIT)
+AC_SUBST(JAVA_JUNIT)
+AC_SUBST(TESTS_JUNIT)])
diff --git a/db/dist/aclocal_java/ac_check_rqrd_class.ac b/db/dist/aclocal_java/ac_check_rqrd_class.ac
new file mode 100644
index 000000000..579be366a
--- /dev/null
+++ b/db/dist/aclocal_java/ac_check_rqrd_class.ac
@@ -0,0 +1,26 @@
+dnl @synopsis AC_CHECK_RQRD_CLASS
+dnl
+dnl AC_CHECK_RQRD_CLASS tests the existence of a given Java class, either in
+dnl a jar or in a '.class' file and fails if it doesn't exist.
+dnl Its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version Id: ac_check_rqrd_class.ac,v 1.1 2001/08/23 16:58:43 dda Exp
+dnl
+
+AC_DEFUN([AC_CHECK_RQRD_CLASS],[
+CLASS=`echo $1|sed 's/\./_/g'`
+AC_CHECK_CLASS($1)
+if test "$HAVE_LAST_CLASS" = "no"; then
+ AC_MSG_ERROR([Required class $1 missing, exiting.])
+fi
+])
diff --git a/db/dist/aclocal_java/ac_java_options.ac b/db/dist/aclocal_java/ac_java_options.ac
new file mode 100644
index 000000000..a86067bbf
--- /dev/null
+++ b/db/dist/aclocal_java/ac_java_options.ac
@@ -0,0 +1,32 @@
+dnl @synopsis AC_JAVA_OPTIONS
+dnl
+dnl AC_JAVA_OPTIONS adds configure command line options used for Java m4
+dnl macros. This Macro is optional.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version Id: ac_java_options.ac,v 1.1 2001/08/23 16:58:43 dda Exp
+dnl
+AC_DEFUN([AC_JAVA_OPTIONS],[
+AC_ARG_WITH(java-prefix,
+ [ --with-java-prefix=PFX prefix where Java runtime is installed (optional)])
+AC_ARG_WITH(javac-flags,
+ [ --with-javac-flags=FLAGS flags to pass to the Java compiler (optional)])
+AC_ARG_WITH(java-flags,
+ [ --with-java-flags=FLAGS flags to pass to the Java VM (optional)])
+JAVAPREFIX=$with_java_prefix
+JAVACFLAGS=$with_javac_flags
+JAVAFLAGS=$with_java_flags
+AC_SUBST(JAVAPREFIX)dnl
+AC_SUBST(JAVACFLAGS)dnl
+AC_SUBST(JAVAFLAGS)dnl
+AC_SUBST(JAVA)dnl
+AC_SUBST(JAVAC)dnl
+])
diff --git a/db/dist/aclocal_java/ac_jni_include_dirs.ac b/db/dist/aclocal_java/ac_jni_include_dirs.ac
new file mode 100644
index 000000000..66e777b47
--- /dev/null
+++ b/db/dist/aclocal_java/ac_jni_include_dirs.ac
@@ -0,0 +1,97 @@
+dnl @synopsis AC_JNI_INCLUDE_DIR
+dnl
+dnl AC_JNI_INCLUDE_DIR finds include directories needed
+dnl for compiling programs using the JNI interface.
+dnl
+dnl JNI include directories are usually in the java distribution
+dnl This is deduced from the value of JAVAC. When this macro
+dnl completes, a list of directories is left in the variable
+dnl JNI_INCLUDE_DIRS.
+dnl
+dnl Example usage follows:
+dnl
+dnl AC_JNI_INCLUDE_DIR
+dnl
+dnl for JNI_INCLUDE_DIR in $JNI_INCLUDE_DIRS
+dnl do
+dnl CPPFLAGS="$CPPFLAGS -I$JNI_INCLUDE_DIR"
+dnl done
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVAC=yourcompiler before calling
+dnl AC_JNI_INCLUDE_DIR
+dnl
+dnl - at the configure level, setenv JAVAC
+dnl
+dnl Note: This macro can work with the autoconf M4 macros for Java programs.
+dnl This particular macro is not part of the original set of macros.
+dnl
+dnl @author Don Anderson <dda@sleepycat.com>
+dnl @version Id: ac_jni_include_dirs.ac,v 1.5 2001/09/17 18:48:28 bostic Exp
+dnl
+AC_DEFUN(AC_JNI_INCLUDE_DIR,[
+
+JNI_INCLUDE_DIRS=""
+
+test "x$JAVAC" = x && AC_MSG_ERROR(['$JAVAC' undefined])
+AC_PATH_PROG(_ACJNI_JAVAC, $JAVAC, no)
+test "x$_ACJNI_JAVAC" = xno && AC_MSG_ERROR([$JAVAC could not be found in path])
+
+_ACJNI_FOLLOW_SYMLINKS("$_ACJNI_JAVAC")
+_JTOPDIR=`echo "$_ACJNI_FOLLOWED" | sed -e 's://*:/:g' -e 's:/[[^/]]*$::'`
+case "$host_os" in
+ darwin*) _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'`
+ _JINC="$_JTOPDIR/Headers";;
+ *) _JINC="$_JTOPDIR/include";;
+esac
+if test -f "$_JINC/jni.h"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JINC"
+else
+ _JTOPDIR=`echo "$_JTOPDIR" | sed -e 's:/[[^/]]*$::'`
+ if test -f "$_JTOPDIR/include/jni.h"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include"
+ else
+ AC_MSG_ERROR([cannot find java include files])
+ fi
+fi
+
+# get the likely subdirectories for system specific java includes
+case "$host_os" in
+aix*) _JNI_INC_SUBDIRS="aix";;
+bsdi*) _JNI_INC_SUBDIRS="bsdos";;
+linux*) _JNI_INC_SUBDIRS="linux genunix";;
+osf*) _JNI_INC_SUBDIRS="alpha";;
+solaris*) _JNI_INC_SUBDIRS="solaris";;
+*) _JNI_INC_SUBDIRS="genunix";;
+esac
+
+# add any subdirectories that are present
+for JINCSUBDIR in $_JNI_INC_SUBDIRS
+do
+ if test -d "$_JTOPDIR/include/$JINCSUBDIR"; then
+ JNI_INCLUDE_DIRS="$JNI_INCLUDE_DIRS $_JTOPDIR/include/$JINCSUBDIR"
+ fi
+done
+])
+
+# _ACJNI_FOLLOW_SYMLINKS <path>
+# Follows symbolic links on <path>,
+# finally setting variable _ACJNI_FOLLOWED
+# --------------------
+AC_DEFUN(_ACJNI_FOLLOW_SYMLINKS,[
+# find the include directory relative to the javac executable
+_cur="$1"
+while ls -ld "$_cur" 2>/dev/null | grep " -> " >/dev/null; do
+ AC_MSG_CHECKING(symlink for $_cur)
+ _slink=`ls -ld "$_cur" | sed 's/.* -> //'`
+ case "$_slink" in
+ /*) _cur="$_slink";;
+ # 'X' avoids triggering unwanted echo options.
+ *) _cur=`echo "X$_cur" | sed -e 's/^X//' -e 's:[[^/]]*$::'`"$_slink";;
+ esac
+ AC_MSG_RESULT($_cur)
+done
+_ACJNI_FOLLOWED="$_cur"
+])# _ACJNI
+
diff --git a/db/dist/aclocal_java/ac_prog_jar.ac b/db/dist/aclocal_java/ac_prog_jar.ac
new file mode 100644
index 000000000..3bd789517
--- /dev/null
+++ b/db/dist/aclocal_java/ac_prog_jar.ac
@@ -0,0 +1,36 @@
+dnl @synopsis AC_PROG_JAR
+dnl
+dnl AC_PROG_JAR tests for an existing jar program. It uses the environment
+dnl variable JAR then tests in sequence various common jar programs.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAR=yourcompiler before calling
+dnl AC_PROG_JAR
+dnl
+dnl - at the configure level, setenv JAR
+dnl
+dnl You can use the JAR variable in your Makefile.in, with @JAR@.
+dnl
+dnl Note: This macro depends on the autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download that whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl The general documentation of those macros, as well as the sample
+dnl configure.in, is included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Egon Willighagen <egonw@sci.kun.nl>
+dnl @version Id: ac_prog_jar.ac,v 1.1 2001/08/23 16:58:43 dda Exp
+dnl
+AC_DEFUN([AC_PROG_JAR],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar$EXEEXT)
+else
+ test "x$JAR" = x && AC_CHECK_PROGS(JAR, jar, $JAVAPREFIX)
+fi
+test "x$JAR" = x && AC_MSG_ERROR([no acceptable jar program found in \$PATH])
+AC_PROVIDE([$0])dnl
+])
diff --git a/db/dist/aclocal_java/ac_prog_java.ac b/db/dist/aclocal_java/ac_prog_java.ac
new file mode 100644
index 000000000..906e212e1
--- /dev/null
+++ b/db/dist/aclocal_java/ac_prog_java.ac
@@ -0,0 +1,77 @@
+dnl @synopsis AC_PROG_JAVA
+dnl
+dnl Here is a summary of the main macros:
+dnl
+dnl AC_PROG_JAVAC: finds a Java compiler.
+dnl
+dnl AC_PROG_JAVA: finds a Java virtual machine.
+dnl
+dnl AC_CHECK_CLASS: finds if we have the given class (beware of CLASSPATH!).
+dnl
+dnl AC_CHECK_RQRD_CLASS: finds if we have the given class and stops otherwise.
+dnl
+dnl AC_TRY_COMPILE_JAVA: attempt to compile user given source.
+dnl
+dnl AC_TRY_RUN_JAVA: attempt to compile and run user given source.
+dnl
+dnl AC_JAVA_OPTIONS: adds Java configure options.
+dnl
+dnl AC_PROG_JAVA tests an existing Java virtual machine. It uses the
+dnl environment variable JAVA then tests in sequence various common Java
+dnl virtual machines. For political reasons, it starts with the free ones.
+dnl You *must* call [AC_PROG_JAVAC] before.
+dnl
+dnl If you want to force a specific VM:
+dnl
+dnl - at the configure.in level, set JAVA=yourvm before calling AC_PROG_JAVA
+dnl (but after AC_INIT)
+dnl
+dnl - at the configure level, setenv JAVA
+dnl
+dnl You can use the JAVA variable in your Makefile.in, with @JAVA@.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl TODO: allow to exclude virtual machines (rationale: most Java programs
+dnl cannot run with some VM like kaffe).
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl A Web page, with a link to the latest CVS snapshot is at
+dnl <http://www.internatif.org/bortzmeyer/autoconf-Java/>.
+dnl
+dnl This is a sample configure.in
+dnl Process this file with autoconf to produce a configure script.
+dnl
+dnl AC_INIT(UnTag.java)
+dnl
+dnl dnl Checks for programs.
+dnl AC_CHECK_CLASSPATH
+dnl AC_PROG_JAVAC
+dnl AC_PROG_JAVA
+dnl
+dnl dnl Checks for classes
+dnl AC_CHECK_RQRD_CLASS(org.xml.sax.Parser)
+dnl AC_CHECK_RQRD_CLASS(com.jclark.xml.sax.Driver)
+dnl
+dnl AC_OUTPUT(Makefile)
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version Id: ac_prog_java.ac,v 1.1 2001/08/23 16:58:43 dda Exp
+dnl
+AC_DEFUN([AC_PROG_JAVA],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test x$JAVAPREFIX = x; then
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT)
+else
+ test x$JAVA = x && AC_CHECK_PROGS(JAVA, kaffe$EXEEXT java$EXEEXT, $JAVAPREFIX)
+fi
+test x$JAVA = x && AC_MSG_ERROR([no acceptable Java virtual machine found in \$PATH])
+AC_PROG_JAVA_WORKS
+AC_PROVIDE([$0])dnl
+])
diff --git a/db/dist/aclocal_java/ac_prog_java_works.ac b/db/dist/aclocal_java/ac_prog_java_works.ac
new file mode 100644
index 000000000..23357470d
--- /dev/null
+++ b/db/dist/aclocal_java/ac_prog_java_works.ac
@@ -0,0 +1,97 @@
+dnl @synopsis AC_PROG_JAVA_WORKS
+dnl
+dnl Internal use ONLY.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version Id: ac_prog_java_works.ac,v 1.1 2001/08/23 16:58:44 dda Exp
+dnl
+AC_DEFUN([AC_PROG_JAVA_WORKS], [
+AC_CHECK_PROG(uudecode, uudecode$EXEEXT, yes)
+if test x$uudecode = xyes; then
+AC_CACHE_CHECK([if uudecode can decode base 64 file], ac_cv_prog_uudecode_base64, [
+dnl /**
+dnl * Test.java: used to test if java compiler works.
+dnl */
+dnl public class Test
+dnl {
+dnl
+dnl public static void
+dnl main( String[] argv )
+dnl {
+dnl System.exit (0);
+dnl }
+dnl
+dnl }
+cat << \EOF > Test.uue
+begin-base64 644 Test.class
+yv66vgADAC0AFQcAAgEABFRlc3QHAAQBABBqYXZhL2xhbmcvT2JqZWN0AQAE
+bWFpbgEAFihbTGphdmEvbGFuZy9TdHJpbmc7KVYBAARDb2RlAQAPTGluZU51
+bWJlclRhYmxlDAAKAAsBAARleGl0AQAEKEkpVgoADQAJBwAOAQAQamF2YS9s
+YW5nL1N5c3RlbQEABjxpbml0PgEAAygpVgwADwAQCgADABEBAApTb3VyY2VG
+aWxlAQAJVGVzdC5qYXZhACEAAQADAAAAAAACAAkABQAGAAEABwAAACEAAQAB
+AAAABQO4AAyxAAAAAQAIAAAACgACAAAACgAEAAsAAQAPABAAAQAHAAAAIQAB
+AAEAAAAFKrcAErEAAAABAAgAAAAKAAIAAAAEAAQABAABABMAAAACABQ=
+====
+EOF
+if uudecode$EXEEXT Test.uue; then
+ ac_cv_prog_uudecode_base64=yes
+else
+ echo "configure: __oline__: uudecode had trouble decoding base 64 file 'Test.uue'" >&AC_FD_CC
+ echo "configure: failed file was:" >&AC_FD_CC
+ cat Test.uue >&AC_FD_CC
+ ac_cv_prog_uudecode_base64=no
+fi
+rm -f Test.uue])
+fi
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ rm -f Test.class
+ AC_MSG_WARN([I have to compile Test.class from scratch])
+ if test x$ac_cv_prog_javac_works = xno; then
+ AC_MSG_ERROR([Cannot compile java source. $JAVAC does not work properly])
+ fi
+ if test x$ac_cv_prog_javac_works = x; then
+ AC_PROG_JAVAC
+ fi
+fi
+AC_CACHE_CHECK(if $JAVA works, ac_cv_prog_java_works, [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+TEST=Test
+changequote(, )dnl
+cat << \EOF > $JAVA_TEST
+/* [#]line __oline__ "configure" */
+public class Test {
+public static void main (String args[]) {
+ System.exit (0);
+} }
+EOF
+changequote([, ])dnl
+if test x$ac_cv_prog_uudecode_base64 != xyes; then
+ if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) && test -s $CLASS_TEST; then
+ :
+ else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+ AC_MSG_ERROR(The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?))
+ fi
+fi
+if AC_TRY_COMMAND($JAVA $JAVAFLAGS $TEST) >/dev/null 2>&1; then
+ ac_cv_prog_java_works=yes
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+ AC_MSG_ERROR(The Java VM $JAVA failed (see config.log, check the CLASSPATH?))
+fi
+rm -fr $JAVA_TEST $CLASS_TEST Test.uue
+])
+AC_PROVIDE([$0])dnl
+]
+)
diff --git a/db/dist/aclocal_java/ac_prog_javac.ac b/db/dist/aclocal_java/ac_prog_javac.ac
new file mode 100644
index 000000000..98f3eece7
--- /dev/null
+++ b/db/dist/aclocal_java/ac_prog_javac.ac
@@ -0,0 +1,43 @@
+dnl @synopsis AC_PROG_JAVAC
+dnl
+dnl AC_PROG_JAVAC tests an existing Java compiler. It uses the environment
+dnl variable JAVAC then tests in sequence various common Java compilers. For
+dnl political reasons, it starts with the free ones.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVAC=yourcompiler before calling
+dnl AC_PROG_JAVAC
+dnl
+dnl - at the configure level, setenv JAVAC
+dnl
+dnl You can use the JAVAC variable in your Makefile.in, with @JAVAC@.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl TODO: allow to exclude compilers (rationale: most Java programs cannot compile
+dnl with some compilers like guavac).
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version Id: ac_prog_javac.ac,v 1.3 2001/08/23 17:08:22 dda Exp
+dnl
+AC_DEFUN([AC_PROG_JAVAC],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT)
+else
+ test "x$JAVAC" = x && AC_CHECK_PROGS(JAVAC, javac$EXEEXT "gcj$EXEEXT -C" guavac$EXEEXT jikes$EXEEXT, $JAVAPREFIX)
+fi
+test "x$JAVAC" = x && AC_MSG_ERROR([no acceptable Java compiler found in \$PATH])
+AC_PROG_JAVAC_WORKS
+AC_PROVIDE([$0])dnl
+])
diff --git a/db/dist/aclocal_java/ac_prog_javac_works.ac b/db/dist/aclocal_java/ac_prog_javac_works.ac
new file mode 100644
index 000000000..c83465713
--- /dev/null
+++ b/db/dist/aclocal_java/ac_prog_javac_works.ac
@@ -0,0 +1,35 @@
+dnl @synopsis AC_PROG_JAVAC_WORKS
+dnl
+dnl Internal use ONLY.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Stephane Bortzmeyer <bortzmeyer@pasteur.fr>
+dnl @version Id: ac_prog_javac_works.ac,v 1.1 2001/08/23 16:58:44 dda Exp
+dnl
+AC_DEFUN([AC_PROG_JAVAC_WORKS],[
+AC_CACHE_CHECK([if $JAVAC works], ac_cv_prog_javac_works, [
+JAVA_TEST=Test.java
+CLASS_TEST=Test.class
+cat << \EOF > $JAVA_TEST
+/* [#]line __oline__ "configure" */
+public class Test {
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS $JAVA_TEST) >/dev/null 2>&1; then
+ ac_cv_prog_javac_works=yes
+else
+ AC_MSG_ERROR([The Java compiler $JAVAC failed (see config.log, check the CLASSPATH?)])
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat $JAVA_TEST >&AC_FD_CC
+fi
+rm -f $JAVA_TEST $CLASS_TEST
+])
+AC_PROVIDE([$0])dnl
+])
diff --git a/db/dist/aclocal_java/ac_prog_javadoc.ac b/db/dist/aclocal_java/ac_prog_javadoc.ac
new file mode 100644
index 000000000..5ce42ade9
--- /dev/null
+++ b/db/dist/aclocal_java/ac_prog_javadoc.ac
@@ -0,0 +1,37 @@
+dnl @synopsis AC_PROG_JAVADOC
+dnl
+dnl AC_PROG_JAVADOC tests for an existing javadoc generator. It uses the environment
+dnl variable JAVADOC then tests in sequence various common javadoc generator.
+dnl
+dnl If you want to force a specific compiler:
+dnl
+dnl - at the configure.in level, set JAVADOC=yourgenerator before calling
+dnl AC_PROG_JAVADOC
+dnl
+dnl - at the configure level, setenv JAVADOC
+dnl
+dnl You can use the JAVADOC variable in your Makefile.in, with @JAVADOC@.
+dnl
+dnl Note: This macro depends on the autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download that whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl
+dnl The general documentation of those macros, as well as the sample
+dnl configure.in, is included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Egon Willighagen <egonw@sci.kun.nl>
+dnl @version Id: ac_prog_javadoc.ac,v 1.1 2001/08/23 16:58:44 dda Exp
+dnl
+AC_DEFUN([AC_PROG_JAVADOC],[
+AC_REQUIRE([AC_EXEEXT])dnl
+if test "x$JAVAPREFIX" = x; then
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc$EXEEXT)
+else
+ test "x$JAVADOC" = x && AC_CHECK_PROGS(JAVADOC, javadoc, $JAVAPREFIX)
+fi
+test "x$JAVADOC" = x && AC_MSG_ERROR([no acceptable javadoc generator found in \$PATH])
+AC_PROVIDE([$0])dnl
+])
+
diff --git a/db/dist/aclocal_java/ac_prog_javah.ac b/db/dist/aclocal_java/ac_prog_javah.ac
new file mode 100644
index 000000000..9be2c57bd
--- /dev/null
+++ b/db/dist/aclocal_java/ac_prog_javah.ac
@@ -0,0 +1,26 @@
+dnl @synopsis AC_PROG_JAVAH
+dnl
+dnl AC_PROG_JAVAH tests the availability of the javah header generator
+dnl and looks for the jni.h header file. If available, JAVAH is set to
+dnl the full path of javah and CPPFLAGS is updated accordingly.
+dnl
+dnl @author Luc Maisonobe
+dnl @version Id: ac_prog_javah.ac,v 1.1 2001/08/23 16:58:44 dda Exp
+dnl
+AC_DEFUN([AC_PROG_JAVAH],[
+AC_REQUIRE([AC_CANONICAL_SYSTEM])dnl
+AC_REQUIRE([AC_PROG_CPP])dnl
+AC_PATH_PROG(JAVAH,javah)
+if test x"`eval 'echo $ac_cv_path_JAVAH'`" != x ; then
+ AC_TRY_CPP([#include <jni.h>],,[
+ ac_save_CPPFLAGS="$CPPFLAGS"
+changequote(, )dnl
+ ac_dir=`echo $ac_cv_path_JAVAH | sed 's,\(.*\)/[^/]*/[^/]*$,\1/include,'`
+ ac_machdep=`echo $build_os | sed 's,[-0-9].*,,'`
+changequote([, ])dnl
+ CPPFLAGS="$ac_save_CPPFLAGS -I$ac_dir -I$ac_dir/$ac_machdep"
+ AC_TRY_CPP([#include <jni.h>],
+ ac_save_CPPFLAGS="$CPPFLAGS",
+ AC_MSG_WARN([unable to include <jni.h>]))
+ CPPFLAGS="$ac_save_CPPFLAGS"])
+fi])
diff --git a/db/dist/aclocal_java/ac_try_compile_java.ac b/db/dist/aclocal_java/ac_try_compile_java.ac
new file mode 100644
index 000000000..6d9031333
--- /dev/null
+++ b/db/dist/aclocal_java/ac_try_compile_java.ac
@@ -0,0 +1,39 @@
+dnl @synopsis AC_TRY_COMPILE_JAVA
+dnl
+dnl AC_TRY_COMPILE_JAVA attempt to compile user given source.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version Id: ac_try_compile_java.ac,v 1.1 2001/08/23 16:58:44 dda Exp
+dnl
+AC_DEFUN([AC_TRY_COMPILE_JAVA],[
+AC_REQUIRE([AC_PROG_JAVAC])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [import $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat Test.java >&AC_FD_CC
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/db/dist/aclocal_java/ac_try_run_javac.ac b/db/dist/aclocal_java/ac_try_run_javac.ac
new file mode 100644
index 000000000..de9bb37d6
--- /dev/null
+++ b/db/dist/aclocal_java/ac_try_run_javac.ac
@@ -0,0 +1,40 @@
+dnl @synopsis AC_TRY_RUN_JAVA
+dnl
+dnl AC_TRY_RUN_JAVA attempt to compile and run user given source.
+dnl
+dnl *Warning*: its success or failure can depend on a proper setting of the
+dnl CLASSPATH env. variable.
+dnl
+dnl Note: This is part of the set of autoconf M4 macros for Java programs.
+dnl It is VERY IMPORTANT that you download the whole set, some
+dnl macros depend on other. Unfortunately, the autoconf archive does not
+dnl support the concept of set of macros, so I had to break it for
+dnl submission.
+dnl The general documentation, as well as the sample configure.in, is
+dnl included in the AC_PROG_JAVA macro.
+dnl
+dnl @author Devin Weaver <ktohg@tritarget.com>
+dnl @version Id: ac_try_run_javac.ac,v 1.1 2001/08/23 16:58:45 dda Exp
+dnl
+AC_DEFUN([AC_TRY_RUN_JAVA],[
+AC_REQUIRE([AC_PROG_JAVAC])dnl
+AC_REQUIRE([AC_PROG_JAVA])dnl
+cat << \EOF > Test.java
+/* [#]line __oline__ "configure" */
+ifelse([$1], , , [include $1;])
+public class Test {
+[$2]
+}
+EOF
+if AC_TRY_COMMAND($JAVAC $JAVACFLAGS Test.java) && test -s Test.class && ($JAVA $JAVAFLAGS Test; exit) 2>/dev/null
+then
+dnl Don't remove the temporary files here, so they can be examined.
+ ifelse([$3], , :, [$3])
+else
+ echo "configure: failed program was:" >&AC_FD_CC
+ cat Test.java >&AC_FD_CC
+ifelse([$4], , , [ rm -fr Test*
+ $4
+])dnl
+fi
+rm -fr Test*])
diff --git a/db/dist/buildrel b/db/dist/buildrel
new file mode 100644
index 000000000..e530b093f
--- /dev/null
+++ b/db/dist/buildrel
@@ -0,0 +1,84 @@
+#!/bin/sh -
+# Id: buildrel,v 1.30 2001/08/07 18:19:37 bostic Exp
+#
+# Build the distribution archives.
+#
+# A set of commands intended to be cut and pasted into a csh window.
+
+# Development tree, release home.
+setenv D /a/db
+
+# Update the release number.
+cd $D/dist
+vi RELEASE
+setenv VERSION \
+`sh -c '. RELEASE; echo $DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH'`
+echo "Version: $VERSION"
+
+# Make sure the source tree is up-to-date, generate new support files, and
+# commit anything that's changed.
+cd $D && cvs -q update
+cd $D/dist && sh s_all
+cd $D && cvs -q commit
+
+# Copy a development tree into a release tree.
+setenv R /a/db-$VERSION
+rm -rf $R && mkdir -p $R
+cd $D && tar cf - \
+`cvs -q status | sed -n -e "/Repository/s;.*/CVSROOT/db/;;" -e "s/,v//p"` | \
+(cd $R && tar xpf -)
+
+# Fix symbolic links and permissions.
+cd $R/dist && sh s_perm
+cd $R/dist && sh s_symlink
+
+# Build the documentation.
+cd $R/docs_src && make
+
+# Build a version.
+cd $R && rm -rf build_run && mkdir build_run
+cd $R/build_run && ~bostic/bin/dbconf && make >& mklog
+
+# Smoke test.
+./ex_access
+
+# Check the install
+make prefix=`pwd`/BDB install
+
+# Clean up the tree.
+cd $R && rm -rf build_run docs_src java/src/com/sleepycat/test
+cd $R && rm -rf test/TODO test/upgrade
+cd $R && rm -rf test_cxx test_perf test_purify test_server test_thread
+cd $R && rm -rf test_vxworks
+
+# ACQUIRE ROOT PRIVILEGES
+cd $R && find . -type d | xargs chmod 775
+cd $R && find . -type f | xargs chmod 444
+cd $R && chmod 664 build_win32/*.dsp
+cd $R/dist && sh s_perm
+chown -R 100.100 $R
+# DISCARD ROOT PRIVILEGES
+
+# Compare this release with the last one.
+set LR=3.1.X
+cd $R/.. && gzcat /a/releases/db-${LR}.tar.gz | tar xf -
+cd $R/../db-${LR} && find . | sort > /tmp/__OLD
+cd $R && find . | sort > /tmp/__NEW
+diff -c /tmp/__OLD /tmp/__NEW
+
+# Create the tar archive release.
+setenv T "$R/../db-$VERSION.tar.gz"
+cd $R/.. && tar cf - db-$VERSION | gzip --best > $T
+chmod 444 $T
+
+# Create the zip archive release.
+#
+# Remove symbolic links to tags files. They're large and we don't want to
+# store real symbolic links in the archive for portability reasons.
+# ACQUIRE ROOT PRIVILEGES
+cd $R && rm -f `find . -type l -name 'tags'`
+# DISCARD ROOT PRIVILEGES
+
+setenv T "$R/../db-$VERSION.zip"
+cd $R/.. && zip -r - db-$VERSION > $T
+chmod 444 $T
diff --git a/db/dist/s_test b/db/dist/s_test
new file mode 100755
index 000000000..978d0e466
--- /dev/null
+++ b/db/dist/s_test
@@ -0,0 +1,85 @@
+#!/bin/sh -
+# Id: s_test,v 1.21 2001/09/24 20:46:59 sandstro Exp
+#
+# Build the Tcl test files.
+
+msgshb="# DO NOT EDIT BELOW THIS LINE: automatically built by dist/s_test."
+
+t=/tmp/__t
+trap 'rm -f $t; exit 0' 0 1 2 3 13 15
+
+. RELEASE
+
+(echo "set tclsh_path @TCL_TCLSH@" && \
+ echo "set tcllib .libs/libdb_tcl-@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@SOSUFFIX@" && \
+ echo "set rpc_server localhost" && \
+ echo "set rpc_path ." && \
+ echo "set src_root @srcdir@/.." && \
+ echo "set test_path @srcdir@/../test" && \
+ echo "global testdir" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "" && \
+ echo "set KILL \"@db_cv_path_kill@\"" && \
+ echo "" && \
+ echo "$msgshb" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global util_path" && \
+ echo "set rpc_testdir \$rpc_path/TESTDIR" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test") > $t
+
+f=../test/include.tcl
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+(echo "set tclsh_path SET_YOUR_TCLSH_PATH" && \
+ echo "set test_path ../test" && \
+ echo "set src_root .." && \
+ echo "global testdir" && \
+ echo "set testdir ./TESTDIR" && \
+ echo "set tcllib ./Debug/libdb_tcl${DB_VERSION_MAJOR}${DB_VERSION_MINOR}d.dll" && \
+ echo "" && \
+ echo "set KILL ./dbkill.exe" && \
+ echo "" && \
+ echo "$msgshb" && \
+ echo "" && \
+ echo "global dict" && \
+ echo "global util_path" && \
+ echo "" && \
+ echo "global is_hp_test" && \
+ echo "global is_qnx_test" && \
+ echo "global is_windows_test") > $t
+
+f=../build_win32/include.tcl
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
+
+# Build the test directory TESTS file.
+(echo $msgshb;
+cat `egrep -l '^#[ ][ ]*TEST' ../test/*.tcl` |
+sed -e '/^#[ ][ ]*TEST/!{' \
+ -e 's/.*//' \
+ -e '}' |
+cat -s |
+sed -e '/TEST/{' \
+ -e 's/^#[ ][ ]*TEST//' \
+ -e 's/^ //' \
+ -e 'H' \
+ -e 'd' \
+ -e '}' \
+ -e 's/.*//' \
+ -e x \
+ -e 's/\n/__LINEBREAK__/g' |
+sort |
+sed -e 's/__LINEBREAK__/\
+=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\
+/' \
+ -e 's/__LINEBREAK__/\
+ /g') > $t
+
+f=../test/TESTS
+cmp $t $f > /dev/null 2>&1 ||
+ (echo "Building $f" && rm -f $f && cp $t $f && chmod 444 $f)
diff --git a/db/dist/vx_ae.in/Makefile.component b/db/dist/vx_ae.in/Makefile.component
new file mode 100644
index 000000000..0eb96ad93
--- /dev/null
+++ b/db/dist/vx_ae.in/Makefile.component
@@ -0,0 +1,433 @@
+# Component project makefile generated by the project manager
+#
+
+
+## core information
+
+
+CONFIGLETTE = compConfig.c
+TGT_DIR = $(WIND_BASE)/target
+EXTERNAL_BINARIES_DIR = $(PRJ_DIR)/extbin/$(BUILD_SPEC)
+COMPONENT_NAME = __DB_APPLICATION_NAME__
+.PHONY: defaultTarget
+defaultTarget: $(COMPONENT_NAME).cm
+
+#
+# default definition for CPU and TOOL (needed by defs.project)
+#
+
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = __DB_APPLICATION_NAME__.o \
+ compConfig.o
+COMPONENT_OBJS = __DB_APPLICATION_NAME__.o
+DEPENDENCY_FILES = __DB_APPLICATION_NAME__.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+CPU = PENTIUM2
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = __DB_APPLICATION_NAME__.o \
+ compConfig.o
+COMPONENT_OBJS = __DB_APPLICATION_NAME__.o
+DEPENDENCY_FILES = __DB_APPLICATION_NAME__.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+CPU = PENTIUM
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = __DB_APPLICATION_NAME__.o \
+ compConfig.o
+COMPONENT_OBJS = __DB_APPLICATION_NAME__.o
+DEPENDENCY_FILES = __DB_APPLICATION_NAME__.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+## CPU and TOOL info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+CPU = SIMSPARCSOLARIS
+TOOL = gnu
+SYS_OBJ_DIR = $(WIND_BASE)/target/lib/obj$(CPU)$(TOOL)vx
+USER_OBJ_DIR = $(WIND_BASE)/target/user/objs/obj$(CPU)$(TOOL)
+PRJ_OBJS = __DB_APPLICATION_NAME__.o \
+ compConfig.o
+COMPONENT_OBJS = __DB_APPLICATION_NAME__.o
+DEPENDENCY_FILES = __DB_APPLICATION_NAME__.d \
+ compConfig.d
+NUMBER_OF_COMPONENT_OBJS = $(words $(COMPONENT_OBJS))
+endif
+
+
+include $(TGT_DIR)/h/make/defs.project
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2
+CFLAGS_AS = -mcpu=pentiumpro -march=pentiumpro -ansi -O2 -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM2
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 1
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build-configuration info for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## macros
+
+AR = arpentium
+AS = ccpentium
+CC = ccpentium
+CFLAGS = -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM
+CFLAGS_AS = -mcpu=pentium -march=pentium -ansi -g -nostdlib -fno-builtin -fno-defer-pop -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=PENTIUM
+CPP = ccpentium -E -P
+CPPFILT = c++filtpentium --strip-underscores
+LD = ldpentium
+LDFLAGS = -X
+LDPARTIAL = ccpentium -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -X -r
+NM = nmpentium -g
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizepentium
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## macros
+
+AR = arsparc
+AS = ccsparc
+CC = ccsparc
+CFLAGS = -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS
+CFLAGS_AS = -ansi -g -fno-builtin -P -x assembler-with-cpp -Wall -I. -I$(WIND_BASE)/target/h -DCPU=SIMSPARCSOLARIS
+CPP = ccsparc -E -P
+CPPFILT = c++filtsparc --strip-underscores
+LD = ldsparc
+LDFLAGS = -T $(WIND_BASE)/target/config/solaris/linker-script
+LDPARTIAL = ccsparc -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ -nostdlib -r -Wl,-X
+LD_PARTIAL_FLAGS = -nostdlib -r
+NM = nmsparc
+OPTION_DEFINE_MACRO = -D
+OPTION_GENERATE_DEPENDENCY_FILE = -MD
+OPTION_INCLUDE_DIR = -I
+RELEASE = 0
+SIZE = sizesparc
+POST_BUILD_RULE =
+
+## end build-configuration info for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+# override make definitions only below this line
+
+
+# override make definitions only above this line
+
+include $(TGT_DIR)/h/make/rules.project
+
+
+#
+# Dummy target to force external make
+#
+
+FORCE_EXTERNAL_MAKE:
+
+
+
+#
+# Custom makefile
+#
+
+include $(PRJ_DIR)/Makefile.custom
+
+
+## build rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## build rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentiumpro -march=pentiumpro -ansi -DRW_MULTI_THREAD -D_REENTRANT -O2 -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM2 $< > $@
+
+
+## end build rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## build rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccpentium -E -P -M -mcpu=pentium -march=pentium -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -nostdlib -fno-builtin -fno-defer-pop -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=PENTIUM $< > $@
+
+
+## end build rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+%.o : $(PRJ_DIR)/%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+%.o : $(PRJ_DIR)/../%.c
+ $(CC) -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -MD -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS -c $<
+
+%.d : $(PRJ_DIR)/../%.c
+ ccsparc -E -P -M -ansi -DRW_MULTI_THREAD -D_REENTRANT -g -fno-builtin -Wall -I. -I$(WIND_BASE)/target/h -I$(PRJ_DIR)/../.. -I$(PRJ_DIR)/../../../include -I$(PRJ_DIR)/../../../include_auto -DCPU=SIMSPARCSOLARIS $< > $@
+
+
+## end build rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+#
+# Rule for configurable, binary component
+#
+# Downloadable component module build rules
+#
+__DB_APPLICATION_NAME__.cm: $(PRJ_OBJS) mxrDoc.nm depend
+#
+# Partially link all __DB_APPLICATION_NAME__ modules together
+# including the configlette modules
+#
+ $(LD) -r -o $@ $(PRJ_OBJS) $(CC_LIB)
+#
+# Generate _vxMain and _vxExit by munching
+#
+ $(NM) $@ | $(MUNCH) > __DB_APPLICATION_NAME__.c
+ $(COMPILE_TRADITIONAL) __DB_APPLICATION_NAME___ctdt.c -o __DB_APPLICATION_NAME___ctdt.o
+ $(LD) -r -o __DB_APPLICATION_NAME__.tmp $@ __DB_APPLICATION_NAME___ctdt.o
+ $(RM) $@
+ $(MV) __DB_APPLICATION_NAME__.tmp $@
+#
+# Adds entry point table section to __DB_APPLICATION_NAME__ component
+#
+ $(PD_EPT_DDE_ADD) $@ $(PRJ_DIR)/$(PRJ_FILE) INDEX $(BUILD_SPEC)
+
+
+#
+# Partial link build rules
+# Partially link all __DB_APPLICATION_NAME__ modules together
+# If no source files, then generates dummy.c
+#
+
+ifeq ($(COMPONENT_OBJS),)
+dummy.c:
+ $(CFG_GEN) $(PRJ_DIR)/$(PRJ_FILE) dummyCGen $(PRJ_TYPE)
+
+__DB_APPLICATION_NAME__.pl: dummy.o mxrDoc.nm depend
+ $(LD) -r -o $@ dummy.o
+else
+__DB_APPLICATION_NAME__.pl: $(COMPONENT_OBJS) mxrDoc.nm depend
+ $(LD) -r -o $@ $(COMPONENT_OBJS)
+endif
+
+#
+# nm file containing symbol information for all component objects
+#
+
+EXTERNAL_BINARIES_DIR_PATTERN_1 = $(subst \,\\,$(PRJ_DIR)/extbin/$(BUILD_SPEC)/)
+EXTERNAL_BINARIES_DIR_PATTERN = $(subst /,\/,$(EXTERNAL_BINARIES_DIR_PATTERN_1))
+
+mxrDoc.nm: mxrDoc.size $(COMPONENT_OBJS)
+ $(RM) $@
+# if there are no object files, make a blank mxr doc
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" >> $@
+else
+# make sure object file name is in mxrDoc.nm even if there
+# is only one object file
+ $(ECHO) __DB_APPLICATION_NAME__.o: >> $@
+ $(NM) __DB_APPLICATION_NAME__.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+#
+# size file containing size information for all component objects
+#
+mxrDoc.size: $(COMPONENT_OBJS) $(PRJ_DIR)/component.cdf
+ $(RM) $@
+ifeq ($(NUMBER_OF_COMPONENT_OBJS),0)
+ @$(ECHO) "" > $@
+else
+ $(SIZE) __DB_APPLICATION_NAME__.o | sed -e "s|$(EXTERNAL_BINARIES_DIR_PATTERN)||g" >> $@
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.debug)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUM2gnu.release'
+
+ifeq ($(BUILD_SPEC),PENTIUM2gnu.release)
+
+## end user-defined rules for build specification 'PENTIUM2gnu.release'
+
+endif
+
+
+## user-defined rules for build specification 'PENTIUMgnu.debug'
+
+ifeq ($(BUILD_SPEC),PENTIUMgnu.debug)
+
+## end user-defined rules for build specification 'PENTIUMgnu.debug'
+
+endif
+
+
+## user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+ifeq ($(BUILD_SPEC),SIMSPARCSOLARISgnu.debug)
+
+## end user-defined rules for build specification 'SIMSPARCSOLARISgnu.debug'
+
+endif
+
+
+#
+# Recursive clean
+#
+
+rclean : clean
+
diff --git a/db/dist/vx_ae.in/Makefile.custom b/db/dist/vx_ae.in/Makefile.custom
new file mode 100644
index 000000000..ca781f7b2
--- /dev/null
+++ b/db/dist/vx_ae.in/Makefile.custom
@@ -0,0 +1,51 @@
+#
+# Custom Makefile shell
+#
+# This file may be edited freely, since it will not be regenerated
+# by the project manager.
+#
+# Use this makefile to define rules to make external binaries
+# and deposit them in the $(EXTERNAL_BINARIES_DIR) directory.
+#
+# If you have specified external modules during your component
+# creation, you will find make rules already in place below.
+# You will likely have to edit these to suit your individual
+# build setup.
+#
+# You may wish to use the CPU, BUILD_SPEC or TOOL make variables in
+# your Makefile to support builds for different architectures. Use
+# the FORCE_EXTERNAL_MAKE phony target to ensure that your external
+# make always runs.
+#
+# The example below assumes that your custom makefile is in the
+# mySourceTree directory, and that the binary file it produces
+# is placed into the $(BUILD_SPEC) sub-directory.
+#
+# EXTERNAL_SOURCE_BASE = /folk/me/mySourceTree
+# EXTERNAL_MODULE = myLibrary.o
+# EXTERNAL_MAKE = make
+#
+# $(EXTERNAL_BINARIES_DIR)/$(EXTERNAL_MODULE) : FORCE_EXTERNAL_MAKE
+# $(EXTERNAL_MAKE) -C $(EXTERNAL_SOURCE_BASE) \
+# -f $(EXTERNAL_SOURCE_BASE)/Makefile \
+# CPU=$(CPU) BUILD_SPEC=$(BUILD_SPEC) $(@F)
+# $(CP) $(subst /,$(DIRCHAR),$(EXTERNAL_SOURCE_BASE)/$(BUILD_SPEC)/$(@F) $@)
+#
+# If you are not adding your external modules from the component wizard,
+# you will have to include them in your component yourself:
+#
+# From the GUI, you can do this with the Component's 'Add external module'
+# dialog.
+#
+# If you are using the command line, add the module(s) by editing the
+# MODULES line in component.cdf file, e.g.
+#
+# Component INCLUDE_MYCOMPONENT {
+#
+# MODULES foo.o goo.o \
+# myLibrary.o
+#
+
+
+# rules to build custom libraries
+
diff --git a/db/dist/vx_ae.in/component.cdf b/db/dist/vx_ae.in/component.cdf
new file mode 100644
index 000000000..91edaa878
--- /dev/null
+++ b/db/dist/vx_ae.in/component.cdf
@@ -0,0 +1,30 @@
+/* component.cdf - dynamically updated configuration */
+
+/*
+ * NOTE: you may edit this file to alter the configuration
+ * But all non-configuration information, including comments,
+ * will be lost upon rebuilding this project.
+ */
+
+/* Component information */
+
+Component INCLUDE___DB_CAPAPPL_NAME__ {
+ ENTRY_POINTS ALL_GLOBAL_SYMBOLS
+ MODULES __DB_APPLICATION_NAME__.o
+ NAME __DB_APPLICATION_NAME__
+ PREF_DOMAIN ANY
+ _INIT_ORDER usrComponentsInit
+}
+
+/* EntryPoint information */
+
+/* Module information */
+
+Module __DB_APPLICATION_NAME__.o {
+
+ NAME __DB_APPLICATION_NAME__.o
+ SRC_PATH_NAME $PRJ_DIR/../__DB_APPLICATION_NAME__.c
+}
+
+/* Parameter information */
+
diff --git a/db/dist/vx_ae.in/component.wpj b/db/dist/vx_ae.in/component.wpj
new file mode 100644
index 000000000..59c1acebc
--- /dev/null
+++ b/db/dist/vx_ae.in/component.wpj
@@ -0,0 +1,615 @@
+Document file - DO NOT EDIT
+
+<BEGIN> CORE_INFO_TYPE
+::prj_component
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+AE1.1
+<END>
+
+<BEGIN> BUILD__CURRENT
+SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.debug_TC
+::tc_PENTIUM2gnu.debug
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_DEFAULTFORCPU
+0
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CFLAGS_AS
+-mcpu=pentiumpro \
+ -march=pentiumpro \
+ -ansi \
+ -O2 \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM2
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RELEASE
+1
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUM2gnu.release_TC
+::tc_PENTIUM2gnu.release
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AR
+arpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_AS
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CC
+ccpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CFLAGS_AS
+-mcpu=pentium \
+ -march=pentium \
+ -ansi \
+ -g \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPP
+ccpentium -E -P
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_CPPFILT
+c++filtpentium --strip-underscores
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD
+ldpentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDFLAGS
+-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LDPARTIAL
+ccpentium \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_NM
+nmpentium -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_MACRO_SIZE
+sizepentium
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu.debug_TC
+::tc_PENTIUMgnu.debug
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_CURRENT_TARGET
+default
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_DEFAULTFORCPU
+1
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/../__DB_APPLICATION_NAME___DB_APPLICATION_NAME__.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_infoTags
+toolMacro objects
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_objects
+compConfig.o
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_FILE_$(PRJ_DIR)/compConfig.c_toolMacro
+CC
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AR
+arsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_AS
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CC
+ccsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS
+-ansi \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -g \
+ -fno-builtin \
+ -MD \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -I$(PRJ_DIR)/../.. \
+ -I$(PRJ_DIR)/../../../include \
+ -I$(PRJ_DIR)/../../../include_auto \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CFLAGS_AS
+-ansi \
+ -g \
+ -fno-builtin \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -Wall \
+ -I. \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=SIMSPARCSOLARIS
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPP
+ccsparc -E -P
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_CPPFILT
+c++filtsparc --strip-underscores
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD
+ldsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDFLAGS
+-T $(WIND_BASE)/target/config/solaris/linker-script
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LDPARTIAL
+ccsparc \
+ -B$(WIND_BASE)/host/$(WIND_HOST_TYPE)/lib/gcc-lib/ \
+ -nostdlib \
+ -r \
+ -Wl,-X
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_LD_PARTIAL_FLAGS
+-nostdlib -r
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_NM
+nmsparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_GENERATE_DEPENDENCY_FILE
+-MD
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_MACRO_SIZE
+sizesparc
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RELEASE
+0
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_RO_DEPEND_PATH
+$(WIND_BASE)/target/h/
+<END>
+
+<BEGIN> BUILD_SIMSPARCSOLARISgnu.debug_TC
+::tc_SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUM2gnu.debug PENTIUM2gnu.release PENTIUMgnu.debug SIMSPARCSOLARISgnu.debug
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/../__DB_APPLICATION_NAME__.c \
+ $(PRJ_DIR)/compConfig.c
+<END>
+
+<BEGIN> WCC__CDF_PATH
+$(PRJ_DIR)
+<END>
+
+<BEGIN> WCC__CURRENT
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__LIST
+PENTIUM2gnu.debug
+<END>
+
+<BEGIN> WCC__MXR_LIBS
+lib$(CPU)$(TOOL)vx.a
+<END>
+
+<BEGIN> WCC__OBJS_PATH
+$(WIND_BASE)/target/lib/obj$CPU$TOOLvx
+<END>
+
diff --git a/db/dist/vx_buildcd b/db/dist/vx_buildcd
new file mode 100755
index 000000000..b6135711e
--- /dev/null
+++ b/db/dist/vx_buildcd
@@ -0,0 +1,119 @@
+#!/bin/sh
+# Id: vx_buildcd,v 1.5 2001/09/11 17:09:02 sue Exp
+#
+# Build the Setup SDK CD image on the VxWorks host machine.
+
+. ./RELEASE
+
+B=`pwd`
+B=$B/..
+D=$B/dist/vx_setup
+C=$D/db.CD
+Q=/export/home/sue/SetupSDK
+S=$Q/resource/mfg/setup
+W=sun4-solaris2
+
+symdoc=$D/docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH
+symdb=$D/target/src/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH
+rm -rf $D/docs $D/target
+mkdir $D/docs $D/target $D/target/src
+ln -s $B/docs $symdoc
+ln -s $B $symdb
+
+s=/tmp/__db_a
+t=/tmp/__db_b
+
+#
+# Remove the old CD directory if it is there.
+if test -d $C; then
+ echo "$C cannot exist."
+ echo "As root, execute 'rm -rf $C'"
+ echo "and then rerun the script"
+ exit 1
+fi
+
+#
+# Check for absolute pathnames in the project files.
+# That is bad, but Tornado insists on putting them in
+# whenever you add new files.
+#
+rm -f $t
+f=`find $B/build_vxworks -name \*.wpj -print`
+for i in $f; do
+ grep -l -- "$B" $i >> $t
+done
+if test -s $t; then
+ echo "The following files contain absolute pathnames."
+ echo "They must be fixed before building the CD image:"
+ cat $t
+ exit 1
+fi
+
+#
+# NOTE: We reuse the same sed script over several files.
+#
+cat <<ENDOFSEDTEXT > $s
+s/@DB_VERSION_MAJOR@/$DB_VERSION_MAJOR/g
+s/@DB_VERSION_MINOR@/$DB_VERSION_MINOR/g
+s/@DB_VERSION_PATCH@/$DB_VERSION_PATCH/g
+s#@DB_SETUP_DIR@#$D#g
+ENDOFSEDTEXT
+
+f=$D/setup.pool
+(sed -f $s $D/vx_setup.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/README.TXT
+(sed -f $s $D/README.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/CONFIG.TCL
+(sed -f $s $D/CONFIG.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+f=$D/filelist.demo
+(sed -f $s $D/vx_demofile.in) > $t
+ (echo "Building $f" && rm -f $f && cp $t $f)
+
+# Copy the Sleepycat specific files into the SetupSDK area.
+(cd $D && cp README.TXT $S)
+(cd $D && cp LICENSE.TXT $S)
+(cd $D && cp CONFIG.TCL $S/RESOURCE/TCL)
+(cd $D && cp SETUP.BMP $S/RESOURCE/BITMAPS)
+
+#
+# NOTE: The contents of LIB must be on one, long, single line.
+# Even preserving it with a \ doesn't work for htmlBook.
+#
+f=../docs/LIB
+(echo "Building $f" && rm -f $f)
+cat <<ENDOFLIBTEXT >> $f
+{BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH} {Sleepycat Software Berkeley DB} {<b>BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH</b>} {<b><a href="./index.html">BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH</a></b>} {Sleepycat BerkeleyDB} {} {} {}
+ENDOFLIBTEXT
+
+#
+# Start generating the file list.
+f=$D/filelist.all
+
+#
+# Just put everything into the image. But we only want to find regular
+# files; we cannot have all the directories listed too.
+#
+# NOTE: This find is overly aggressive in getting files, particularly
+# for the 'target/src' files. We actually end up with 3 sets of the
+# documentation, the "real" ones in 'docs/BerkeleyDB*', the set found
+# via 'target/src/Berk*/docs' and the one found via our symlink in
+# 'target/src/Berk*/dist/vx_setup/docs/Berk*'.
+#
+# However, we waste a little disk space so that the expression below
+# is trivial and we don't have to maintain it as new files/directories
+# are added to DB.
+#
+(cd $D && find docs/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name \* -type f -print) > $t
+(cd $D && find target/src/BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR.$DB_VERSION_PATCH/ -follow -name docs -prune -o -type f -print) >> $t
+(echo "Building $f" && rm -f $f && cp $t $f)
+#
+# Finally build the CD image!
+#
+env PATH=$Q/$W/bin:$PATH QMS_BASE=$Q WIND_HOST_TYPE=$W \
+pool mfg -d $C -v -nokey BerkeleyDB.$DB_VERSION_MAJOR.$DB_VERSION_MINOR < $D/setup.pool
diff --git a/db/dist/vx_setup/CONFIG.in b/db/dist/vx_setup/CONFIG.in
new file mode 100644
index 000000000..6ccceee70
--- /dev/null
+++ b/db/dist/vx_setup/CONFIG.in
@@ -0,0 +1,10 @@
+#
+# Install configuration file.
+#
+# Note: This file may be modified during the pool manufacturing process to
+# add additional configuration statements. This file is sourced by
+# INSTW32.TCL.
+#
+
+cdromDescSet "Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@"
+
diff --git a/db/dist/vx_setup/LICENSE.TXT b/db/dist/vx_setup/LICENSE.TXT
new file mode 100644
index 000000000..92600c424
--- /dev/null
+++ b/db/dist/vx_setup/LICENSE.TXT
@@ -0,0 +1,3 @@
+Copyright (c) 1996-2001
+ Sleepycat Software. All rights reserved.
+See the file LICENSE for redistribution information.
diff --git a/db/dist/vx_setup/MESSAGES.TCL b/db/dist/vx_setup/MESSAGES.TCL
new file mode 100644
index 000000000..718a67fbc
--- /dev/null
+++ b/db/dist/vx_setup/MESSAGES.TCL
@@ -0,0 +1,651 @@
+# MESSAGES.TCL - All setup strings.
+
+# modification history
+# --------------------
+# 03q,20apr99,bjl added release notes message for backward compatibility
+# page.
+# 03p,12apr99,wmd Add word about simulator in message about the drivers
+# object product.
+# 03o,03mar99,tcy Adjust setup directory size based on platform (fix for
+# SPR 25228)
+# 03n,24feb99,tcy modified DLL update messages
+# 03m,22feb99,tcy modified to align messages
+# 03l,17feb99,tcy modified message in the finish page for program group
+# installation
+# 03k,11feb99,tcy added messages for backward compatibility page
+# 03j,25jan99,tcy added messages from INSTW32.TCL
+# 03i,25jan99,wmd Reword the message for 5010_DRIVERS_INFO.
+# 03h,09dec98,bjl added messages about manufacturers updating patches.
+# 03g,01dec98,wmd Fix typos.
+# 03f,23nov98,tcy warn user to disable virus protection on Welcome screen
+# 03e,19nov98,wmd fixed minor nits in wording.
+# 03d,19nov98,bjl added web site locations for patchinfo.
+# 03c,18nov98,bjl added formatted patch messages for patchinfo file.
+# 03b,12nov98,tcy added message for not saving installation key
+# 03a,10nov98,tcy added warning message for space in destination directory
+# removed message for checking temporary disk space
+# 02z,27oct98,bjl added recommended patch messages, modified required msg.
+# 02y,26oct98,tcy added message for checking temporary disk space
+# 02x,22oct98,wmd fix messages for clarity.
+# 02w,21oct98,wmd fix message for drv/obj.
+# 02v,20oct98,tcy added message for updating system and changed dcom message
+# 02u,20oct98,bjl added tornado registry name entry message.
+# 02t,19oct98,bjl added tornado registry description message.
+# 02s,16oct98,wmd add new message for driver product warning.
+# 02r,16oct98,wmd fixed README.TXT description.
+# 02q,12oct98,tcy removed extraneous "the" from messages
+# 02p,06oct98,tcy added CD description to Welcome page
+# 02o,29sep98,bjl added required patches message 5000_PATCHES_TEXT.
+# 02n,29sep98,wmd add text for readme page
+# 02m,29sep98,tcy refined DLL registration page text
+# 02l,29sep98,tcy changed message for DCOM
+# 02k,26sep98,tcy added messages for DLL and DCOM pages
+# 02j,24sep98,tcy removed "following" from 1080_WARN_4 message.
+# 02i,17sep98,tcy added comment on size of SETUP files to 1140_COMP_SELECT.
+# 02h,17sep98,wmd reword message 1080_WARN_4.
+# 02g,14sep98,tcy changed 1210_FINISH and 1550_USAGE messages
+# 02f,08sep98,tcy warn user library update may take several minutes
+# 02e,01sep98,wmd reword message for installing over tree.
+# added new messages for license agreement pages.
+# 02d,20aug98,wmd added message for license agreeement.
+# 02c,18aug98,tcy added message for zip-file dialog box
+# 02d,04aug98,wmd added newer/older duplicate file warnings.
+# 02c,24jul98,tcy added system check messages
+# 02b,16jul98,wmd add new messages for T-2.
+# 02a,22jul98,tcy moved license messages to LICW32.TCL;
+# removed portMapper messages
+# 01n,09feb98,pdn updated string 1080_WARN_4
+# 01m,08apr97,pdn added new string for remote icon installing
+# fixed spr#8334
+# 01l,08mar97,tcy fixed language in string id 3340
+# 01k,07mar97,tcy added string id 3340
+# 01j,10feb97,pdn added more license messages.
+# 01i,09feb97,pdn implemented variable argument list for strTableGet(),
+# clean up.
+# 01h,17jan97,jmo fixed language in strings
+# 01g,12dec96,tcy merged in TEXT-only strings
+# 01f,12dec96,pdn added 1080_WARN_4 string warning that CD-ROM
+# revision is older than expected.
+# 01e,27nov96,sj added string for warning against installing in
+# the root of windows drive.
+# 01d,18nov96,tcy added strings for text-based installation script
+# 01c,14nov96,pdn substituted function for some global variables
+# 01b,14nov96,sj added strings from Windows installation script
+# 01a,11nov96,pdn written
+
+proc strTableGet {strId args} {
+ global strTable
+ global setupVals
+ global current_file
+
+ if [regexp {^format.*$} $strTable($strId) junk] {
+ return [eval $strTable($strId)]
+ } {
+ return $strTable($strId)
+ }
+}
+
+set strTable(1000_WELCOME_CD) \
+ "format %s \"[cdNameGet description]\""
+
+set strTable(1000_WELCOME1) \
+ "format %s \"Welcome to the SETUP program. This program will\
+ install \[cdromDescGet\] on your computer.\""
+
+set strTable(1010_WELCOME2) \
+ "It is strongly recommended that you exit all programs and disable virus\
+ protection before running this SETUP program."
+
+set strTable(1020_WELCOME3) \
+ "At any time, you can quit the SETUP program by clicking the <Cancel>\
+ button. You also can go back to previous dialog boxes by clicking the\
+ <Back> button. To accept the current settings for a dialog box and go on\
+ with the installation process, click the <Next> button."
+
+set strTable(3020_WELCOME3) \
+ "format %s \"At any prompt, you can cancel installation \[cdromDescGet\]\
+ by typing \'exit\'. You can also go to the previous question\
+ by typing \'-\'. To accept current settings and go on with\
+ the installation process, press <Return>.\""
+
+set strTable(1030_WELCOME4) \
+ "WARNING: This program is protected by copyright law and international\
+ treaties."
+
+set strTable(1040_WELCOME5) \
+ "Unauthorized reproduction or distribution of this program, or any portion\
+ of it, may result in severe civil and criminal penalties, and will be\
+ prosecuted to the maximum extent possible under law."
+
+set strTable(1050_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\] is not\
+ recommended. We suggest that you logoff and logon as a normal\
+ user before running this program.\
+ \n\nClick Next to continue with SETUP anyway.\""
+
+set strTable(3050_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] as \[setupId effective user\]\
+ is not recommended. We suggest that you logoff and \
+ logon as a normal user before running this program.\
+ \n\nPress <Return> to continue with SETUP anyway.\""
+
+set strTable(1051_ROOT_WARN) \
+ "format %s \"Installing \[cdromDescGet\] without System Administrator\
+ privileges is not recommended. Under your present privileges,\
+ SETUP will not offer certain installation options, such as \
+ the installation of some services, etc. Also, the software\
+ will be installed as a personal copy and will not be visible\
+ to other users on this machine.\
+ \n\nTo install \[cdromDescGet\] with access to all its\
+ installation features and options, we suggest that you exit\
+ the installation now and rerun it later with System\
+ Administrator\'s privileges.\n\nClick <Next> to continue with\
+ SETUP anyway.\""
+
+set strTable(1060_REGISTRATION) \
+ "Below, type your name, the name of your company."
+
+set strTable(1070_WARN_1) \
+ "The installation key you entered is invalid. Please enter a valid\
+ installation key."
+
+set strTable(1071_WARN_1) \
+ "Please enter the requested information."
+
+set strTable(1080_WARN_2) \
+ "You entered a key that was not created for this CD-ROM. Please verify\
+ that you are using the appropriate key. If this problem persists, contact\
+ Wind River Systems Sales department for help."
+
+set strTable(1080_WARN_3) \
+ "The installation key you entered is meant for other vendor's CD-ROM.\
+ Please contact the vendor who issued the CD-ROM for a proper key."
+
+set strTable(1085_WARN_4) \
+ "This CD-ROM does not require an installation key. Click the \"Next\"\
+ button to continue the installation."
+
+set strTable(1090_WARN_3) \
+ "format %s \"Can\'t initiate SETUP: \[lindex \$args 0\]. Please correct\
+ the problem then run SETUP again.\""
+
+set strTable(1095_WARN_NO_TCPIP) \
+ "SETUP has detected that your system does not have TCP-IP installed.\
+ To correct the problem, please contact your administrator and then\
+ run SETUP again.\nAborting setup."
+
+set strTable(1097_WARN_NO_LONGFILENAME_SUP) \
+ "SETUP has detected that your system does not have long filename\
+ support. To correct the problem, please contact your administrator\
+ and then run SETUP again.\nAborting setup."
+
+set strTable(1105_FULL_INSTALL) \
+ "Installs the Tornado products, tools, compilers, and other optional\
+ components that you may have purchased."
+
+set strTable(1107_PROGRAM_GROUP) \
+"Installs only the Tornado program group and tools icons for access to\
+ Tornado tools installed on a remote server."
+
+set strTable(1100_DEST_DIR) \
+ "format %s \"Please type the name of the directory where you want SETUP to\
+ install \[cdromDescGet\].\
+ \n\nClick the <Browse> button to choose the directory\
+ interactively.\""
+
+set strTable(1100_REMOTE_DIR) \
+ "format %s \"Please type the name of the directory where Tornado has\
+ already been installed.\
+ \n\nClick the <Browse> button to choose the directory\
+ interactively.\""
+
+set strTable(3100_DEST_DIR) \
+ "format %s \"Please type the name of the directory where you want SETUP\
+ to install \[cdromDescGet\].\""
+
+set strTable(1110_DEST_DIR_WARN) \
+ "The installation directory you entered does not exist.\
+ \nDo you want to create it now?"
+
+set strTable(3110_DEST_DIR_WARN) \
+ "The installation directory you entered does not exist."
+
+set strTable(3115_DEST_DIR_QUESTION) \
+ "Do you want to create it now? \[y\]"
+
+set strTable(1111_DEST_DIR_WARN) \
+ "format %s \"Installing \[cdromDescGet\] in the root directory is not\
+ recommended.\nClick <Yes> to select another directory.\""
+
+set strTable(1120_DEST_DIR_WARN2) \
+ "format %s \"Creating \[destDirGet\] failed: file exists.\""
+
+set strTable(1121_DEST_DIR_WARN2) \
+ "format %s \"Installing in \[destDirGet\] is not recommended.\
+ \nDo you want to change the installation directory?\""
+
+set strTable(1122_DEST_DIR_WARN2) \
+ "format %s \"Unable to create \[destDirGet\].\""
+
+set strTable(1130_DEST_DIR_WARN3) \
+ "You do not have permission to write files into the installation directory\
+ you entered.\
+ \n\nPlease choose a writable directory."
+
+set strTable(1135_DEST_DIR_WARN4) \
+ "format %s \"The installation directory you entered contains white\
+ space(s). Please select another directory.\""
+
+set strTable(1137_DUP_PRODUCT_WARN) \
+ "format %s \"Reinstalling products may potentially destroy any\
+ modifications you may have made to previously installed files.\
+ Do you wish to continue with the installation or go back to the\
+ '\[strTableGet 1450_TITLE_OPTION\]' page to reconsider your choices?\""
+
+set strTable(3155_COMP_SELECT_QUESTION) \
+ "Do you want to go back and specify a directory on a bigger partition?\
+ \[y\]"
+
+set strTable(1140_COMP_SELECT) \
+ "format %s \"In the option list below, please check all items you wish\
+ to install. SETUP files will be copied to your selected directory and\
+ take up \[setupSizeGet\] MB of disk space.\n\""
+
+set strTable(3140_COMP_SELECT) \
+ "In the option list below, select the item(s) you want to install."
+
+set strTable(3145_COMP_SELECT_CHANGE) \
+ "Press <Return> to accept the setting. To change the setting, enter a\
+ list of item numbers separated by spaces."
+
+set strTable(3145_COMP_SELECT_CHANGE_INVALID) \
+ "The item number(s) you entered is not valid."
+
+set strTable(1150_COMP_SELECT_WARN) \
+ "There is not enough disk space to install the selected component(s).\
+ \n\nDo you want to go back and specify a directory on a bigger disk or\
+ partition?"
+
+set strTable(3150_COMP_SELECT_WARN) \
+ "There is not enough space to install the selected component(s)."
+
+set strTable(1151_COMP_SELECT_WARN) \
+ "At least one component must be selected to continue installation."
+
+set strTable(1160_PERMISSION) \
+ "SETUP is about to install the component(s) you have requested.\
+ \n\nThe selected button(s) below indicate the file permissions which\
+ will be set during the installation process.\
+ \n\nPlease adjust these to suit your site requirements."
+
+set strTable(3160_PERMISSION) \
+ "SETUP is about to install the component(s) you have requested."
+
+set strTable(3162_PERMISSION) \
+ "The list below indicates the file permissions which will be set during\
+ the installation process. Please adjust these to suit your site\
+ requirements."
+
+set strTable(3165_PERMISSION_QUESTION) \
+ "Press <Return> to accept the setting. To change the setting, enter a\
+ list of item numbers separated by spaces."
+
+set strTable(1161_FOLDER_SELECT) \
+ "SETUP will add program icons to the Program Folder listed below. You may\
+ type a new folder name, or select one from the existing Folders list."
+
+set strTable(1162_FOLDER_SELECT) \
+ "Please enter a valid folder name."
+
+set strTable(1170_FILE_COPY) \
+ "format %s \"SETUP is copying the selected component(s) to the directory\
+ \[destDirGet\].\""
+
+set strTable(1171_FILE_COPY) \
+ "format %s \"SETUP cannot read \[setupFileNameGet 0\] from the CD-ROM.\
+ Please ensure that the CD-ROM is properly mounted.\""
+
+set strTable(1180_LIB_UPDATE) \
+ "SETUP is updating the VxWorks libraries. We recommend that you let\
+ SETUP finish this step, or the libraries will be in an inconsistent\
+ state. Please be patient as the process may take several minutes. \
+ If you want to quit the SETUP program, click <Cancel> and run\
+ the SETUP program again at a later time."
+
+set strTable(3180_LIB_UPDATE) \
+ "SETUP is updating the VxWorks libraries."
+
+set strTable(1190_REGISTRY_HOST) \
+ "The Tornado Registry is a daemon that keeps track of all available\
+ targets by name. Only one registry is required on your network, \
+ and it can run on any networked host.\
+ \n\nPlease enter the name of the host where the Tornado Registry will\
+ be running."
+
+set strTable(1191_REGISTRY_DESC) \
+ "The Tornado Registry is a daemon that keeps track of all available\
+ targets by name. Only one registry is required on your network, \
+ and it can run on any networked host."
+
+set strTable(1192_REGISTRY_NAME) \
+ "Please enter the name of the host where the Tornado Registry will\
+ be running."
+
+set strTable(1200_FINISH_WARN) \
+ "format %s \"However, there were \[errorCountGet\] error(s) which occured\
+ during the process. Please review the log file\
+ \[destDirDispGet\]/setup.log for more information.\""
+
+set strTable(1210_FINISH) \
+ "format %s \"SETUP has completed installing the selected product(s).\""
+
+set strTable(1212_FINISH) \
+ "SETUP has completed installing the program folders and icons."
+
+set strTable(1213_FINISH) \
+ "Terminating SETUP program."
+
+set strTable(1360_QUIT_CALLBACK) \
+ "format %s \"SETUP is not complete. If you quit the SETUP program now,\
+ \[cdromDescGet\] will not be installed.\n\nYou may run\
+ the SETUP program at a later time to complete the\
+ installation.\
+ \n\nTo continue installing the program, click <Resume>. \
+ To quit the SETUP program, click <Exit SETUP>.\""
+
+set strTable(3360_QUIT_CALLBACK) \
+ "format %s \"SETUP is not complete. If you quit the SETUP program now,\
+ \[cdromDescGet\] will not be installed.\n\nYou may run the\
+ SETUP program at a later time to complete the installation.\
+ \n\nTo continue installing the program, Press <Return>. \
+ To quit the SETUP program, type \'exit\'.\""
+
+set strTable(1370_FILE_ACCESS_ERROR) \
+ "format %s \"SETUP cannot create/update file \[lindex \$args 0\]:\
+ \[lindex \$args 1\]\""
+
+set strTable(1380_DEFLATE_ERROR) \
+ "format %s \"SETUP isn\'t able to deflate \[setupFileNameGet 0\]\
+ \n\nPlease select one of the following options\
+ to continue with the SETUP process.\""
+
+set strTable(1390_MEMORY_LOW) \
+ "The system is running out of memory. To continue, close applications\
+ or increase the system swap space."
+
+set strTable(1400_DISK_FULL) \
+ "No disk space left. To continue, free up some disk space."
+
+set strTable(1550_USAGE) \
+ "Usage: SETUP /I\[con\]\]\t\n\
+ /I : Add standard Tornado icons \n\
+ from a remote installation"
+
+set strTable(1410_TITLE_WELCOME) "Welcome"
+set strTable(1420_TITLE_WARNING) "Warning"
+set strTable(1430_TITLE_REGISTRATION) "User Registration"
+set strTable(1440_TITLE_DESTDIR) "Select Directory"
+set strTable(1450_TITLE_OPTION) "Select Products"
+set strTable(1460_TITLE_PERMISSION) "Permission"
+set strTable(1470_TITLE_FILECOPY) "Copying Files"
+set strTable(1480_TITLE_LIBUPDATE) "Update Libraries"
+set strTable(1490_TITLE_REGISTRY_HOST) "Tornado Registry"
+set strTable(1495_TITLE_BACKWARD_COMPATIBILITY) "Backward Compatibility"
+set strTable(1500_TITLE_FINISH) "Finish"
+set strTable(1560_TITLE_FOLDER) "Select Folder"
+set strTable(1563_TITLE_DLL_REG) "Software Registration"
+set strTable(1567_TITLE_DCOM) "DCOM Installation"
+
+set strTable(1570_OPTION_SELECT) \
+ "Choose one of the options listed below, then click the\
+ <Next> button to continue the installation."
+
+set strTable(1576_OPTION_MANUAL) \
+ "Install Tornado Registry manually"
+
+set strTable(1577_OPTION_STARTUP) \
+ "Install Tornado Registry locally in the Startup Group"
+
+set strTable(1578_OPTION_SERVICE) \
+ "Install Tornado Registry locally as a Service"
+
+set strTable(1579_OPTION_REMOTE) \
+ "Configure to use a remote Tornado Registry"
+
+set strTable(1580_OPTION_DESC) \
+ "If you plan on running Tornado in a non-networked environment, we\
+ recommend that you install the registry in your Startup Group or as an\
+ NT Service. For more information, consult your Tornado User\'s Guide."
+
+set strTable(1581_OPTION_DESC) \
+ "If you plan on running Tornado in a non-networked environment, we\
+ recommend that you install the registry in your Startup Group. For more\
+ information, consult your Tornado User\'s Guide."
+
+set strTable(3000_RETURN_QUESTION) \
+ "Press <Return> to continue"
+
+set strTable(3055_EXIT_QUESTION) \
+ "Type \'exit\' to quit the program or press <Return> to continue"
+
+set strTable(3370_BACK_CALLBACK) \
+ "Cannot go back further."
+
+set strTable(1080_WARN_4) \
+ "The installation key you entered attempted to unlock one or more \
+ products that may have been removed from our product line. \
+ Please compare the unlocked product list on the\
+ \"[strTableGet 1450_TITLE_OPTION]\" screen with your purchased order\
+ list, and contact us if you discover any differences."
+
+set strTable(4000_BASE_INSTALL_WARN) \
+ "format %s \"Warning! Re-installing Tornado over an existing \
+ tree will overwrite any installed patches. \
+ If you proceed with the installation, please \
+ re-install patches if any.\""
+
+set strTable(4000_BASE_INSTALL_WARN_1) \
+ "Select <Install> to overwrite existing Tornado installation,\
+ or choose <Select Path> to enable you to back up to the \'Select\
+ Directory\' page to enter an alternate path."
+
+set strTable(4010_FILE_EXISTS_OLDER_WARN) \
+ "format %s \"The file \'\$current_file\' exists in your destination\
+ directory path \'\[destDirGet\]\' and is older. You can\
+ set the policy for handling duplicate files by\
+ selecting one of the following buttons. All files to be\
+ overwritten will be backed up.\""
+
+set strTable(4010_FILE_EXISTS_NEWER_WARN) \
+ "format %s \"The file \'\$current_file\' exists in your destination\
+ directory path \'\[destDirGet\]\' and is newer. You can\
+ set the policy for handling duplicate files by\
+ selecting one of the following buttons. All files to be\
+ overwritten will be backed up.\""
+
+set strTable(4010_FILE_EXISTS_WARN_1) \
+ "Overwrite the existing file."
+
+set strTable(4010_FILE_EXISTS_WARN_2) \
+ "Do not overwrite the existing file."
+
+set strTable(4010_FILE_EXISTS_WARN_3) \
+ "Overwrite ALL files, do not show this dialog again."
+
+set strTable(4020_ANALYZING_BANNER) \
+ "Analyzing installation files, please wait..."
+
+set strTable(4030_NO_ZIP_FILE) \
+ "format %s \"SETUP cannot find the ZIP files for installing\
+ \[cdromDescGet\] in the default directory.\n\n\
+ Please type the name of the WIND\
+ directory containing the ZIP files.\n\nClick the\
+ <Browse> button to choose the directory interactively.\""
+
+set strTable(4040_LIC_TEXT) \
+ "Attention: By clicking on the \"I accept\" button or by\
+ Installing the software you are consenting to be bound by\
+ the terms of this agreement (this \"Agreement\"). If you do\
+ not agree to all of the terms, click the \"I don't Accept\" button\
+ and do not install this software. A copy of this Agreement can be viewed\
+ in the Setup directory under the destination path that you have\
+ designated after the installation is completed."
+
+set strTable(4050_PROJECT_TEXT) \
+ "Please enter your project name, and the number of licensed\
+ users on the project in the spaces below."
+
+set strTable(4060_LICENSE_TEXT) \
+ "By clicking on the \"I accept\" button \
+ you are consenting to be bound by the terms of this agreement.\
+ If you do not agree to all of the terms, click the \"Cancel\"\
+ button and do not install this software."
+
+set strTable(4070_DLL_TEXT) \
+ "SETUP is registering software on your machine. This will take a few\
+ minutes."
+
+set strTable(4080_DCOM_TEXT) \
+ "Setup has detected that your COM/DCOM DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 to update your\
+ DLLs.\
+ \n\n\
+ You will have to reboot your system after DLL files have been\
+ installed. Please rerun SETUP to continue with installation\
+ after your system has rebooted.\
+ \n\n\
+ Note: The DCOM95 installation programs update your\
+ system DLLs. You should save all open documents and close all\
+ programs before proceeding.\
+ \n\nWould you like to install \"DCOM95\" now?"
+
+set strTable(4082_DCOM95_AND_COMCTL_TEXT) \
+ "Setup has detected that your COM/DCOM and Common Control DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 and 401comupd.exe to update your\
+ DLLs.\
+ \n\n\
+ You must reboot your system after DLL files have been\
+ installed. After rebooting, please rerun SETUP to continue with\
+ installation.\
+ \n\n\
+ Note: 401comupd.exe and DCOM95 installation programs update your\
+ system DLLs. You should save all open documents and close all\
+ programs before proceeding\
+ \n\nWould you like to install \"401comupd.exe\" and \"DCOM95\" now?"
+
+set strTable(4085_COMCTL_UPDATE_TEXT) \
+ "Setup has detected that your Common Control DLLs must\
+ be updated for the correct operation of Tornado 2.0.\
+ \n\n\
+ Setup will now ask you to run DCOM95 and 401comupd.exe to update your\
+ DLLs.\
+ \n\n\
+ You will have to reboot your system after DLL files have been\
+ installed. Please rerun SETUP to continue with installation\
+ after your system has rebooted.\
+ \n\n\
+ Note: The 401comupd.exe installation program updates your system DLLs. You\
+ should save all open documents and close all programs before installing\
+ 401comupd.exe.\
+ \n\nWould you like to install \"401comupd.exe\" now?"
+
+set strTable(4090_README_TEXT) \
+ "Please read the README file contents that are displayed below.\
+ It contains important information that will enable you to install\
+ and successfully run the BerkeleyDB product. For your convenience\
+ this file is copied to your installation directory path."
+
+set strTable(5000_PATCHES_REQUIRED_TEXT) \
+ "SETUP has detected that required operating system patches\
+ have not been installed on this machine. These patches are\
+ necessary for the correct operation of SETUP and Tornado. Please refer\
+ to the Tornado Release Notes for details.\n\n\
+ The following operating system patches must be installed before\
+ you can continue with installation:\n\n"
+
+set strTable(5001_PATCHES_RECOMMENDED_TEXT) \
+ "\n\nSETUP has also detected that recommended operating system patches\
+ have not been installed. It is recommended that these patches are\
+ installed before starting Tornado to ensure correct operation.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5002_PATCHES_RECOMMENDED_TEXT) \
+ "SETUP has detected that some operating system patches have not been\
+ installed on this machine. It is recommended that these\
+ patches are installed before starting Tornado to ensure correct\
+ operation. Please refer to the Tornado Release Notes\
+ for details.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5003_PATCHES_REQUIRED_FORMATTED_TEXT) \
+ "\n SETUP has detected that required operating system patches\n\
+ have not been installed on this machine. These patches are\n\
+ necessary for the correct operation of SETUP and Tornado. Please refer\n\
+ to the Tornado Release Notes for details.\n\n\
+ The following operating system patches must be installed before\n\
+ you can continue with installation:\n\n"
+
+set strTable(5004_PATCHES_RECOMMENDED_FORMATTED_TEXT) \
+ "\n\n SETUP has also detected that recommended operating system patches\n\
+ have not been installed. It is recommended that these patches are\n\
+ installed before starting Tornado to ensure correct operation.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5005_PATCHES_RECOMMENDED_FORMATTED_TEXT) \
+ "\n SETUP has detected that some operating system patches have not been\n\
+ installed on this machine. It is recommended that these\n\
+ patches are installed before starting Tornado to ensure correct\n\
+ operation. Please refer to the Tornado Release Notes\n\
+ for details.\n\n\
+ The following operating system patches are recommended to be installed:\n\n"
+
+set strTable(5006_PATCHES_SUN_LOCATION) \
+ "\nPatches for Sun machines are available at http://sunsolve.sun.com.\n"
+
+set strTable(5007_PATCHES_HP_LOCATION) \
+ "\nPatches for HP machines are available at:\n\
+ http://us-support.external.hp.com (US, Canada, Asia-Pacific, and\
+ Latin-America)\n\
+ http://europe-support.external.hp.com (Europe)\n"
+
+set strTable(5008_PATCHES_UPDATE) \
+ "\nNote: System vendors very frequently update and replace patches.\
+ If a specific patch is no longer available, please use the\
+ replacement patch suggested by the system vendor.\n"
+
+set strTable(5009_PATCHES_UPDATE_FORMATTED) \
+ "\n Note: System vendors very frequently update and replace patches.\n\
+ If a specific patch is no longer available, please use the\n\
+ replacement patch suggested by the system vendor.\n"
+
+set strTable(5010_DRIVERS_INFO) \
+ "The installation of the Driver component is required because\n\
+ you have selected the basic Tornado product for installation.\n\n\
+ If you wish to uncheck this item you must uncheck either the\n\
+ basic Tornado and/or Tornado Simulator product(s) or go to the\n\
+ 'Details' button for Tornado and uncheck both the Simulator and\n\
+ the Tornado Object parts."
+
+set strTable(5020_DO_NOT_SAVE_KEY_FOR_FAE) \
+ "The installation key you are about to enter will NOT\
+ be saved in the system registry.\nIs this what you want?"
+
+set strTable(5030_BACKWARD_COMPATIBILITY) \
+ "While the portmapper is not needed for Tornado 2.0, it is\
+ included in this release for development environments in\
+ which both Tornado 2.0 and Tornado 1.0.1 are in use.\
+ \n\nWould you like to use your Tornado 1.0.x tools with Tornado 2.0?"
+
+set strTable(5040_BACKWARD_COMPATIBILITY) \
+ "Note:\
+ \n\nIf you have selected to install the Tornado Registry as\
+ a service, there is no way to retain backward compatibility\
+ with Tornado 1.0.x."
+
+set strTable(5050_BACKWARD_COMPATIBILITY) \
+ "For more information on backward compatibility,\
+ please consult the Tornado 2.0 Release Notes."
diff --git a/db/dist/vx_setup/README.in b/db/dist/vx_setup/README.in
new file mode 100644
index 000000000..f96948c37
--- /dev/null
+++ b/db/dist/vx_setup/README.in
@@ -0,0 +1,7 @@
+README.TXT: Sleepycat Software Berkeley DB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@ Release v@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+
+Information on known problems, changes introduced with the
+current revision of the CD-ROM, and other product bulletins
+can be obtained from the Sleepycat Software web site:
+
+ http://www.sleepycat.com/
diff --git a/db/dist/vx_setup/SETUP.BMP b/db/dist/vx_setup/SETUP.BMP
new file mode 100644
index 000000000..2918480b8
--- /dev/null
+++ b/db/dist/vx_setup/SETUP.BMP
Binary files differ
diff --git a/db/dist/vx_setup/vx_allfile.in b/db/dist/vx_setup/vx_allfile.in
new file mode 100644
index 000000000..4d505cf38
--- /dev/null
+++ b/db/dist/vx_setup/vx_allfile.in
@@ -0,0 +1,5 @@
+target/src/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/BerkeleyDB.wpj
+target/src/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/BerkeleyDB.wsp
+target/src/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db.h
+target/src/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db_config.h
+target/src/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/db_int.h
diff --git a/db/dist/vx_setup/vx_demofile.in b/db/dist/vx_setup/vx_demofile.in
new file mode 100644
index 000000000..e92c116b8
--- /dev/null
+++ b/db/dist/vx_setup/vx_demofile.in
@@ -0,0 +1,3 @@
+target/src/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/demo/DBdemo.wpj
+target/src/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/demo/README
+target/src/BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@/build_vxworks/demo/dbdemo.c
diff --git a/db/dist/vx_setup/vx_setup.in b/db/dist/vx_setup/vx_setup.in
new file mode 100644
index 000000000..7bc3f510c
--- /dev/null
+++ b/db/dist/vx_setup/vx_setup.in
@@ -0,0 +1,13 @@
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ demo-db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@/filelist.all
+BerkeleyDB.@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@-Demo
+@DB_SETUP_DIR@
+BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@ Demo program
+demo-db@DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
+@DB_SETUP_DIR@/filelist.demo
+Sleepycat Software BerkeleyDB @DB_VERSION_MAJOR@.@DB_VERSION_MINOR@.@DB_VERSION_PATCH@
diff --git a/db/dist/win_exports.in b/db/dist/win_exports.in
new file mode 100644
index 000000000..65b799cb9
--- /dev/null
+++ b/db/dist/win_exports.in
@@ -0,0 +1,126 @@
+# Id: win_exports.in,v 1.9 2001/10/11 17:05:51 dda Exp
+
+# Standard interfaces.
+ db_create
+ db_env_create
+ db_strerror
+ db_version
+ db_xa_switch
+ log_compare
+ txn_abort
+ txn_begin
+ txn_commit
+
+# Library configuration interfaces.
+ db_env_set_func_close
+ db_env_set_func_dirfree
+ db_env_set_func_dirlist
+ db_env_set_func_exists
+ db_env_set_func_free
+ db_env_set_func_fsync
+ db_env_set_func_ioinfo
+ db_env_set_func_malloc
+ db_env_set_func_map
+ db_env_set_func_open
+ db_env_set_func_read
+ db_env_set_func_realloc
+ db_env_set_func_rename
+ db_env_set_func_seek
+ db_env_set_func_sleep
+ db_env_set_func_unlink
+ db_env_set_func_unmap
+ db_env_set_func_write
+ db_env_set_func_yield
+
+# These are only for testing.
+ __db_dbm_close
+ __db_dbm_delete
+ __db_dbm_fetch
+ __db_dbm_firstkey
+ __db_dbm_init
+ __db_dbm_nextkey
+ __db_dbm_store
+ __db_hcreate
+ __db_hdestroy
+ __db_hsearch
+ __db_loadme
+ __db_ndbm_clearerr
+ __db_ndbm_close
+ __db_ndbm_delete
+ __db_ndbm_dirfno
+ __db_ndbm_error
+ __db_ndbm_fetch
+ __db_ndbm_firstkey
+ __db_ndbm_nextkey
+ __db_ndbm_open
+ __db_ndbm_pagfno
+ __db_ndbm_rdonly
+ __db_ndbm_store
+ __db_panic
+ __db_r_attach
+ __db_r_detach
+ __db_tas_mutex_init
+ __db_tas_mutex_lock
+ __db_tas_mutex_unlock
+ __ham_func2
+ __ham_func3
+ __ham_func4
+ __ham_func5
+ __ham_test
+ __lock_dump_region
+ __memp_dump_region
+ __os_calloc
+ __os_closehandle
+ __os_free
+ __os_freestr
+ __os_ioinfo
+ __os_malloc
+ __os_open
+ __os_openhandle
+ __os_read
+ __os_strdup
+ __os_umalloc
+ __os_write
+
+#These are needed for linking tools or java.
+ __bam_init_print
+ __bam_pgin
+ __bam_pgout
+ __crdel_init_print
+ __db_dispatch
+ __db_dump
+ __db_e_stat
+ __db_err
+ __db_getlong
+ __db_getulong
+ __db_global_values
+ __db_init_print
+ __db_inmemdbflags
+ __db_jump
+ __db_omode
+ __db_pgin
+ __db_pgout
+ __db_prdbt
+ __db_prfooter
+ __db_prheader
+ __db_rpath
+ __db_util_interrupted
+ __db_util_logset
+ __db_util_siginit
+ __db_util_sigresend
+ __db_verify_callback
+ __db_verify_internal
+ __ham_get_meta
+ __ham_init_print
+ __ham_pgin
+ __ham_pgout
+ __ham_release_meta
+ __log_init_print
+ __os_get_errno
+ __os_set_errno
+ __os_sleep
+ __os_ufree
+ __os_yield
+ __qam_init_print
+ __qam_pgin_out
+ __txn_init_print
diff --git a/db/dist/wpj.in b/db/dist/wpj.in
new file mode 100644
index 000000000..42ebb0bd6
--- /dev/null
+++ b/db/dist/wpj.in
@@ -0,0 +1,161 @@
+Document file - DO NOT EDIT
+
+<BEGIN> BUILD_PENTIUMgnu_BUILDRULE
+__DB_APPLICATION_NAME__.out
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AR
+ar386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_ARCHIVE
+$(PRJ_DIR)/PENTIUMgnu/__DB_APPLICATION_NAME__.a
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_AS
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CC
+cc386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -DRW_MULTI_THREAD \
+ -D_REENTRANT \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -I$(PRJ_DIR)/.. \
+ -I$(PRJ_DIR)/../../include \
+ -I$(PRJ_DIR)/../../include_auto \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CFLAGS_AS
+-g \
+ -mpentium \
+ -ansi \
+ -nostdinc \
+ -fvolatile \
+ -nostdlib \
+ -fno-builtin \
+ -fno-defer-pop \
+ -P \
+ -x \
+ assembler-with-cpp \
+ -I$(WIND_BASE)/target/h \
+ -DCPU=PENTIUM
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_CPP
+cc386 -E -P -xc
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD
+ld386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDDEPS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LDFLAGS
+-X -N
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_LD_PARTIAL_FLAGS
+-X -r
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_NM
+nm386 -g
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_DEFINE_MACRO
+-D
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_OPTION_INCLUDE_DIR
+-I
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_POST_BUILD_RULE
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_PRJ_LIBS
+
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_MACRO_SIZE
+size386
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_RO_DEPEND_PATH
+{$(WIND_BASE)/target/h/} \
+ {$(WIND_BASE)/target/src/} \
+ {$(WIND_BASE)/target/config/}
+<END>
+
+<BEGIN> BUILD_PENTIUMgnu_TC
+::tc_PENTIUMgnu
+<END>
+
+<BEGIN> BUILD_RULE_archive
+
+<END>
+
+<BEGIN> BUILD_RULE___DB_APPLICATION_NAME__.out
+
+<END>
+
+<BEGIN> BUILD_RULE_objects
+
+<END>
+
+<BEGIN> BUILD__CURRENT
+PENTIUMgnu
+<END>
+
+<BEGIN> BUILD__LIST
+PENTIUMgnu
+<END>
+
+<BEGIN> CORE_INFO_TYPE
+::prj_vxApp
+<END>
+
+<BEGIN> CORE_INFO_VERSION
+2.0
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_dependDone
+FALSE
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_dependencies
+
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_objects
+__DB_APPLICATION_NAME__.o
+<END>
+
+<BEGIN> FILE___DB_APPLICATION_NAME__.c_tool
+C/C++ compiler
+<END>
+
+<BEGIN> PROJECT_FILES
+$(PRJ_DIR)/__DB_APPLICATION_NAME__.c
+<END>
+
+<BEGIN> userComments
+__DB_APPLICATION_NAME__
+<END>
diff --git a/db/docs/api_c/env_set_timeout.html b/db/docs/api_c/env_set_timeout.html
new file mode 100644
index 000000000..c99234db9
--- /dev/null
+++ b/db/docs/api_c/env_set_timeout.html
@@ -0,0 +1,106 @@
+<!--Id: env_set_timeout.so,v 10.2 2001/09/07 21:43:16 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;set_timeout(DB_ENV *dbenv, db_timeout_t timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;set_timeout function sets timeout values for locks or
+transactions in the database environment. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this database environment.
+<p>The database environment's transaction timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_txn_timeout", one or more whitespace characters,
+and the transaction timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<p><dt><a name="DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for transactions in this database environment.
+<p>The database environment's lock timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lock_timeout", one or more whitespace characters,
+and the lock timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values specified for the database environment may be overridden
+on a per-lock or per-transaction basis. See <a href="../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> and
+<a href="../api_c/txn_set_timeout.html">DB_TXN-&gt;set_timeout</a> for more information.
+<p>The DB_ENV-&gt;set_timeout interface may be used only to configure Berkeley DB before
+the <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> interface is called.
+<p>The DB_ENV-&gt;set_timeout function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;set_timeout function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_c/env_open.html">DB_ENV-&gt;open</a> was called.
+</dl>
+<p>The DB_ENV-&gt;set_timeout function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;set_timeout function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_create.html">db_env_create</a>,
+<a href="../api_c/env_close.html">DB_ENV-&gt;close</a>,
+<a href="../api_c/env_err.html">DB_ENV-&gt;err</a>, <a href="../api_c/env_err.html">DB_ENV-&gt;errx</a>
+<a href="../api_c/env_open.html">DB_ENV-&gt;open</a>,
+<a href="../api_c/env_remove.html">DB_ENV-&gt;remove</a>,
+<a href="../api_c/env_set_alloc.html">DB_ENV-&gt;set_alloc</a>,
+<a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a>,
+<a href="../api_c/env_set_errcall.html">DB_ENV-&gt;set_errcall</a>,
+<a href="../api_c/env_set_errfile.html">DB_ENV-&gt;set_errfile</a>,
+<a href="../api_c/env_set_errpfx.html">DB_ENV-&gt;set_errpfx</a>,
+<a href="../api_c/env_set_feedback.html">DB_ENV-&gt;set_feedback</a>,
+<a href="../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a>,
+<a href="../api_c/env_set_paniccall.html">DB_ENV-&gt;set_paniccall</a>,
+<a href="../api_c/env_set_rec_init.html">DB_ENV-&gt;set_recovery_init</a>,
+<a href="../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a>,
+<a href="../api_c/env_set_shm_key.html">DB_ENV-&gt;set_shm_key</a>,
+<a href="../api_c/env_set_tas_spins.html">DB_ENV-&gt;set_tas_spins</a>,
+<a href="../api_c/env_set_tmp_dir.html">DB_ENV-&gt;set_tmp_dir</a>,
+<a href="../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a>,
+<a href="../api_c/env_set_verbose.html">DB_ENV-&gt;set_verbose</a>,
+<a href="../api_c/env_strerror.html">db_strerror</a>
+and
+<a href="../api_c/env_version.html">db_version</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/lock_id_free.html b/db/docs/api_c/lock_id_free.html
new file mode 100644
index 000000000..8dca5ca14
--- /dev/null
+++ b/db/docs/api_c/lock_id_free.html
@@ -0,0 +1,60 @@
+<!--Id: lock_id_free.so,v 10.1 2001/09/25 15:18:55 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;lock_id_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;lock_id_free</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;lock_id_free(DB_ENV *env, u_int32_t id);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;lock_id_free function frees a locker ID allocated by the
+DB_ENV-&gt;lock_id function.
+<p>The DB_ENV-&gt;lock_id_free function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The locker ID is invalid or locks are still held by this locker ID.
+</dl>
+<p>The DB_ENV-&gt;lock_id_free function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;lock_id_free function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lk_conflicts.html">DB_ENV-&gt;set_lk_conflicts</a>,
+<a href="../api_c/env_set_lk_detect.html">DB_ENV-&gt;set_lk_detect</a>,
+<a href="../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a>,
+<a href="../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a>,
+<a href="../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a>,
+<a href="../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a>,
+<a href="../api_c/lock_get.html">DB_ENV-&gt;lock_get</a>,
+<a href="../api_c/lock_id.html">DB_ENV-&gt;lock_id</a>,
+<a href="../api_c/lock_id_free.html">DB_ENV-&gt;lock_id_free</a>,
+<a href="../api_c/lock_put.html">DB_ENV-&gt;lock_put</a>,
+<a href="../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a>,
+and
+<a href="../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/log_cursor.html b/db/docs/api_c/log_cursor.html
new file mode 100644
index 000000000..0107ada24
--- /dev/null
+++ b/db/docs/api_c/log_cursor.html
@@ -0,0 +1,95 @@
+<!--Id: log_cursor.so,v 10.1 2001/09/28 15:09:35 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;log_cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;log_cursor</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;log_cursor(DB_ENV *dbenv, DB_LOGC **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;log_cursor function
+creates a log cursor and copies a pointer to it into the memory to which
+<b>cursorp</b> refers.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB_ENV-&gt;log_cursor function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;log_cursor function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_ENV-&gt;log_cursor function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;log_cursor function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/db_create.html">db_create</a>,
+<a href="../api_c/db_associate.html">DB-&gt;associate</a>,
+<a href="../api_c/db_close.html">DB-&gt;close</a>,
+<a href="../api_c/db_cursor.html">DB-&gt;cursor</a>,
+<a href="../api_c/db_del.html">DB-&gt;del</a>,
+<a href="../api_c/db_err.html">DB-&gt;err</a>, <a href="../api_c/db_err.html">DB-&gt;errx</a>
+<a href="../api_c/db_fd.html">DB-&gt;fd</a>,
+<a href="../api_c/db_get.html">DB-&gt;get</a>,
+<a href="../api_c/db_get.html">DB-&gt;pget</a>,
+<a href="../api_c/db_get_byteswapped.html">DB-&gt;get_byteswapped</a>,
+<a href="../api_c/db_get_type.html">DB-&gt;get_type</a>,
+<a href="../api_c/db_join.html">DB-&gt;join</a>,
+<a href="../api_c/db_key_range.html">DB-&gt;key_range</a>,
+<a href="../api_c/db_open.html">DB-&gt;open</a>,
+<a href="../api_c/db_put.html">DB-&gt;put</a>,
+<a href="../api_c/db_remove.html">DB-&gt;remove</a>,
+<a href="../api_c/db_rename.html">DB-&gt;rename</a>,
+<a href="../api_c/db_set_alloc.html">DB-&gt;set_alloc</a>,
+<a href="../api_c/db_set_append_recno.html">DB-&gt;set_append_recno</a>,
+<a href="../api_c/db_set_bt_compare.html">DB-&gt;set_bt_compare</a>,
+<a href="../api_c/db_set_bt_minkey.html">DB-&gt;set_bt_minkey</a>,
+<a href="../api_c/db_set_bt_prefix.html">DB-&gt;set_bt_prefix</a>,
+<a href="../api_c/db_set_cachesize.html">DB-&gt;set_cachesize</a>,
+<a href="../api_c/db_set_dup_compare.html">DB-&gt;set_dup_compare</a>,
+<a href="../api_c/db_set_errcall.html">DB-&gt;set_errcall</a>,
+<a href="../api_c/db_set_errfile.html">DB-&gt;set_errfile</a>,
+<a href="../api_c/db_set_errpfx.html">DB-&gt;set_errpfx</a>,
+<a href="../api_c/db_set_feedback.html">DB-&gt;set_feedback</a>,
+<a href="../api_c/db_set_flags.html">DB-&gt;set_flags</a>,
+<a href="../api_c/db_set_h_ffactor.html">DB-&gt;set_h_ffactor</a>,
+<a href="../api_c/db_set_h_hash.html">DB-&gt;set_h_hash</a>,
+<a href="../api_c/db_set_h_nelem.html">DB-&gt;set_h_nelem</a>,
+<a href="../api_c/db_set_lorder.html">DB-&gt;set_lorder</a>,
+<a href="../api_c/db_set_pagesize.html">DB-&gt;set_pagesize</a>,
+<a href="../api_c/db_set_paniccall.html">DB-&gt;set_paniccall</a>,
+<a href="../api_c/db_set_q_extentsize.html">DB-&gt;set_q_extentsize</a>,
+<a href="../api_c/db_set_re_delim.html">DB-&gt;set_re_delim</a>,
+<a href="../api_c/db_set_re_len.html">DB-&gt;set_re_len</a>,
+<a href="../api_c/db_set_re_pad.html">DB-&gt;set_re_pad</a>,
+<a href="../api_c/db_set_re_source.html">DB-&gt;set_re_source</a>,
+<a href="../api_c/db_stat.html">DB-&gt;stat</a>,
+<a href="../api_c/db_sync.html">DB-&gt;sync</a>,
+<a href="../api_c/db_truncate.html">DB-&gt;truncate</a>,
+<a href="../api_c/db_upgrade.html">DB-&gt;upgrade</a>,
+and
+<a href="../api_c/db_verify.html">DB-&gt;verify</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/logc_close.html b/db/docs/api_c/logc_close.html
new file mode 100644
index 000000000..638fdcd32
--- /dev/null
+++ b/db/docs/api_c/logc_close.html
@@ -0,0 +1,66 @@
+<!--Id: logc_close.so,v 10.3 2001/10/02 01:33:35 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_LOGC-&gt;close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_LOGC-&gt;close</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_LOGC-&gt;close(DB_LOGC *cursor, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_LOGC-&gt;close function discards the log cursor. After DB_LOGC-&gt;close
+has been called, regardless of its return, the cursor handle may not be
+used again.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB_LOGC-&gt;close function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_LOGC-&gt;close function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>The DB_LOGC-&gt;close function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_LOGC-&gt;close function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DB_ENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a>,
+<a href="../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a>,
+<a href="../api_c/env_set_lg_regionmax.html">DB_ENV-&gt;set_lg_regionmax</a>,
+<a href="../api_c/log_archive.html">DB_ENV-&gt;log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a>,
+<a href="../api_c/log_file.html">DB_ENV-&gt;log_file</a>,
+<a href="../api_c/log_flush.html">DB_ENV-&gt;log_flush</a>,
+<a href="../api_c/log_put.html">DB_ENV-&gt;log_put</a>,
+<a href="../api_c/log_register.html">DB_ENV-&gt;log_register</a>,
+<a href="../api_c/log_stat.html">DB_ENV-&gt;log_stat</a>,
+<a href="../api_c/log_unregister.html">DB_ENV-&gt;log_unregister</a>,
+<a href="../api_c/logc_close.html">DB_LOGC-&gt;close</a>
+and
+<a href="../api_c/logc_get.html">DB_LOGC-&gt;get</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/logc_get.html b/db/docs/api_c/logc_get.html
new file mode 100644
index 000000000..2292e377a
--- /dev/null
+++ b/db/docs/api_c/logc_get.html
@@ -0,0 +1,109 @@
+<!--Id: logc_get.so,v 10.30 2001/09/29 15:48:08 dda Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_LOGC-&gt;get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_LOGC-&gt;get</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_LOGC-&gt;get(DB_LOGC *logc, DB_LSN *lsn, DBT *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_LOGC-&gt;get function retrieve records from the log according to the
+<b>lsn</b> and <b>flags</b> arguments.
+<p>The data field of the <b>data</b> structure is set to the record
+retrieved, and the size field indicates the number of bytes in the
+record. See <a href="../api_c/dbt.html">DBT</a> for a description of other fields in the
+<b>data</b> structure. The <a href="../api_c/dbt.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>,
+<a href="../api_c/dbt.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> and <a href="../api_c/dbt.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags may be specified
+for any <a href="../api_c/dbt.html">DBT</a> used for data retrieval.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CHECKPOINT">DB_CHECKPOINT</a><dd>The last record written with the DB_CHECKPOINT flag specified to the
+<a href="../api_c/log_put.html">DB_ENV-&gt;log_put</a> function is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_c/db_lsn.html">DB_LSN</a> of the record
+returned. If no record has been previously written with the DB_CHECKPOINT
+flag specified, the first record in the log is returned.
+<p>If the log is empty, the DB_LOGC-&gt;get function will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_FIRST">DB_FIRST</a><dd>The first record from any of the log files found in the log directory
+is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_c/db_lsn.html">DB_LSN</a> of the
+record returned.
+<p>If the log is empty, the DB_LOGC-&gt;get function will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_LAST">DB_LAST</a><dd>The last record in the log is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_c/db_lsn.html">DB_LSN</a> of the
+record returned.
+<p>If the log is empty, the DB_LOGC-&gt;get function will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_NEXT">DB_NEXT</a>, <a name="DB_PREV">DB_PREV</a><dd>The current log position is advanced to the next (previous) record in
+the log, and that record is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_c/db_lsn.html">DB_LSN</a> of the record
+returned.
+<p>If the pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, DB_LOGC-&gt;get will return the first (last) record
+in the log. If the last (first) log record has already been returned
+or the log is empty, the DB_LOGC-&gt;get function will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>If the log was opened with the DB_THREAD flag set, calls to
+DB_LOGC-&gt;get with the DB_NEXT (DB_PREV) flag set will return
+EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the log record to which the log currently refers.
+<p>If the log pointer has not been initialized via DB_FIRST, DB_LAST,
+DB_SET, DB_NEXT, or DB_PREV, or if the log was opened with the DB_THREAD
+flag set, DB_LOGC-&gt;get will return EINVAL.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Retrieve the record specified by the <b>lsn</b> argument. If the
+specified <a href="../api_c/db_lsn.html">DB_LSN</a> is invalid (for example, it does not appear in
+the log) DB_LOGC-&gt;get will return EINVAL.
+</dl>
+<p>Otherwise, the DB_LOGC-&gt;get function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_LOGC-&gt;get function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_CURRENT flag was set and the log pointer had not yet been
+initialized.
+<p>The DB_SET flag was set and the specified log sequence number does not
+exist.
+</dl>
+<p>The DB_LOGC-&gt;get function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_LOGC-&gt;get function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_lg_bsize.html">DB_ENV-&gt;set_lg_bsize</a>,
+<a href="../api_c/env_set_lg_dir.html">DB_ENV-&gt;set_lg_dir</a>,
+<a href="../api_c/env_set_lg_max.html">DB_ENV-&gt;set_lg_max</a>,
+<a href="../api_c/env_set_lg_regionmax.html">DB_ENV-&gt;set_lg_regionmax</a>,
+<a href="../api_c/log_archive.html">DB_ENV-&gt;log_archive</a>,
+<a href="../api_c/log_compare.html">log_compare</a>,
+<a href="../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a>,
+<a href="../api_c/log_file.html">DB_ENV-&gt;log_file</a>,
+<a href="../api_c/log_flush.html">DB_ENV-&gt;log_flush</a>,
+<a href="../api_c/log_put.html">DB_ENV-&gt;log_put</a>,
+<a href="../api_c/log_register.html">DB_ENV-&gt;log_register</a>,
+<a href="../api_c/log_stat.html">DB_ENV-&gt;log_stat</a>,
+<a href="../api_c/log_unregister.html">DB_ENV-&gt;log_unregister</a>,
+<a href="../api_c/logc_close.html">DB_LOGC-&gt;close</a>
+and
+<a href="../api_c/logc_get.html">DB_LOGC-&gt;get</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/memp_fcreate.html b/db/docs/api_c/memp_fcreate.html
new file mode 100644
index 000000000..b59bb3f80
--- /dev/null
+++ b/db/docs/api_c/memp_fcreate.html
@@ -0,0 +1,65 @@
+<!--Id: memp_fcreate.so,v 10.3 2001/07/31 17:25:40 dda Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_ENV-&gt;memp_fcreate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_ENV-&gt;memp_fcreate</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_ENV-&gt;memp_fcreate(DB_ENV *dbenvp, DB_MPOOLFILE **dbmfp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_ENV-&gt;memp_fcreate function creates a DbMpoolFile structure that
+is the handle for a Berkeley DB shared memory buffer pool file. A pointer to
+this structure is returned in the memory to which <b>dbmfp</b> refers.
+Calling the <a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> function will discard the returned handle.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DB_ENV-&gt;memp_fcreate function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_ENV-&gt;memp_fcreate function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_ENV-&gt;memp_fcreate function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a>,
+<a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a>,
+<a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>,
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>,
+<a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a>,
+<a href="../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a>,
+<a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a>,
+<a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a>,
+<a href="../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a>,
+<a href="../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a>,
+<a href="../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a>,
+<a href="../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a>,
+<a href="../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a>,
+<a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>,
+<a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a>,
+and
+<a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/memp_set_clear_len.html b/db/docs/api_c/memp_set_clear_len.html
new file mode 100644
index 000000000..ebc791497
--- /dev/null
+++ b/db/docs/api_c/memp_set_clear_len.html
@@ -0,0 +1,65 @@
+<!--Id: memp_set_clear_len.so,v 10.1 2001/07/24 18:31:14 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_clear_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_clear_len</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_clear_len(DB_MPOOLFILE *mpf, u_int32_t len);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;set_clear_len function sets the number of initial bytes in a
+page that should be set to nul when the page is created as a result of
+the <a href="../api_c/memp_fget.html#DB_MPOOL_CREATE">DB_MPOOL_CREATE</a> or <a href="../api_c/memp_fget.html#DB_MPOOL_NEW">DB_MPOOL_NEW</a> flags being
+specified to <a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>. If no value is specified, or <b>len</b>
+is 0, the entire page is cleared.
+<p>The DB_MPOOLFILE-&gt;set_clear_len function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_clear_len function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_clear_len function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a>,
+<a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a>,
+<a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>,
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>,
+<a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a>,
+<a href="../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a>,
+<a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a>,
+<a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a>,
+<a href="../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a>,
+<a href="../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a>,
+<a href="../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a>,
+<a href="../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a>,
+<a href="../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a>,
+<a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>,
+<a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a>,
+and
+<a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/memp_set_fileid.html b/db/docs/api_c/memp_set_fileid.html
new file mode 100644
index 000000000..61d4fa195
--- /dev/null
+++ b/db/docs/api_c/memp_set_fileid.html
@@ -0,0 +1,86 @@
+<!--Id: memp_set_fileid.so,v 10.1 2001/07/24 18:31:14 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_fileid</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_fileid</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_fileid(DB_MPOOLFILE *mpf, u_int8_t *fileid);
+</pre></h3>
+<h1>Description</h1>
+<p>The shared memory buffer pool functions must be able to uniquely
+identify files in order that multiple processes wanting to share a file
+will correctly identify it in the pool. The DB_MPOOLFILE-&gt;set_fileid function
+specifies a unique identifier for the file. Unique file identifiers
+must be a DB_FILE_ID_LEN length array of bytes.
+<p>On most UNIX/POSIX systems, the <b>fileid</b> field will not need to
+be set, and the memory pool functions will use the file's device and
+inode numbers for this purpose. On Windows systems, the memory pool
+functions use the values returned by GetFileInformationByHandle() by
+default -- these values are known to be constant between processes and
+over reboot in the case of NTFS (in which they are the NTFS MFT
+indices).
+<p>On other filesystems (for example, FAT or NFS), these default values
+are not necessarily unique between processes or across system reboots.
+<b>Applications wanting to maintain a shared memory buffer pool
+between processes or across system reboots, in which the pool contains
+pages from files stored on such filesystems, must specify a unique file
+identifier using the DB_MPOOLFILE-&gt;set_fileid function, and each process opening
+the file must provide the same unique identifier.</b>
+<p>This call should not be necessary for most applications. Specifically,
+it is not necessary if the memory pool is not shared between processes
+and is reinstantiated after each system reboot, if the application is
+using the Berkeley DB access methods instead of calling the pool functions
+explicitly, or if the files in the memory pool are stored on filesystems
+in which the default values as described previously are invariant
+between process and across system reboots.
+<p>The DB_MPOOLFILE-&gt;set_fileid function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_fileid function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_fileid function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a>,
+<a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a>,
+<a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>,
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>,
+<a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a>,
+<a href="../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a>,
+<a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a>,
+<a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a>,
+<a href="../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a>,
+<a href="../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a>,
+<a href="../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a>,
+<a href="../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a>,
+<a href="../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a>,
+<a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>,
+<a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a>,
+and
+<a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/memp_set_ftype.html b/db/docs/api_c/memp_set_ftype.html
new file mode 100644
index 000000000..ff6407b35
--- /dev/null
+++ b/db/docs/api_c/memp_set_ftype.html
@@ -0,0 +1,66 @@
+<!--Id: memp_set_ftype.so,v 10.1 2001/07/24 18:31:14 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_ftype</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_ftype</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_ftype(DB_MPOOLFILE *mpf, int ftype);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;set_ftype function specifies a file type for the purposes of
+input or output processing of the files pages as they are read from or
+written to, the backing filesystem store. The <b>ftype</b> argument
+must be the same as a <b>ftype</b> argument previously specified to
+the <a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a> function. (See the <a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a>
+documentation for more information.)
+<p>The DB_MPOOLFILE-&gt;set_ftype function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_ftype function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_ftype function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a>,
+<a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a>,
+<a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>,
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>,
+<a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a>,
+<a href="../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a>,
+<a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a>,
+<a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a>,
+<a href="../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a>,
+<a href="../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a>,
+<a href="../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a>,
+<a href="../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a>,
+<a href="../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a>,
+<a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>,
+<a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a>,
+and
+<a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/memp_set_lsn_offset.html b/db/docs/api_c/memp_set_lsn_offset.html
new file mode 100644
index 000000000..5a38b8fdf
--- /dev/null
+++ b/db/docs/api_c/memp_set_lsn_offset.html
@@ -0,0 +1,64 @@
+<!--Id: memp_set_lsn_offset.so,v 10.1 2001/07/24 18:31:14 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_lsn_offset</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_lsn_offset</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_lsn_offset(DB_MPOOLFILE *mpf, int32_t lsn_offset);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;set_lsn_offset function specifies the zero-based byte offset
+of a log sequence number (<a href="../api_c/db_lsn.html">DB_LSN</a>) on the file's pages, for the
+purposes of page-flushing as part of transaction checkpoint. (See the
+<a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a> documentation for more information.)
+<p>The DB_MPOOLFILE-&gt;set_lsn_offset function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_lsn_offset function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_lsn_offset function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a>,
+<a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a>,
+<a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>,
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>,
+<a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a>,
+<a href="../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a>,
+<a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a>,
+<a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a>,
+<a href="../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a>,
+<a href="../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a>,
+<a href="../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a>,
+<a href="../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a>,
+<a href="../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a>,
+<a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>,
+<a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a>,
+and
+<a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/memp_set_pgcookie.html b/db/docs/api_c/memp_set_pgcookie.html
new file mode 100644
index 000000000..92a78a902
--- /dev/null
+++ b/db/docs/api_c/memp_set_pgcookie.html
@@ -0,0 +1,65 @@
+<!--Id: memp_set_pgcookie.so,v 10.1 2001/07/24 18:31:15 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_MPOOLFILE-&gt;set_pgcookie</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_MPOOLFILE-&gt;set_pgcookie</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+int
+DB_MPOOLFILE-&gt;set_pgcookie(DB_MPOOLFILE *mpf, DBT *pgcookie);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_MPOOLFILE-&gt;set_pgcookie function specifies a byte string that is provided
+to the functions registered to do input or output processing of the
+file's pages as they are read from or written to, the backing filesystem
+store. (See the <a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a> documentation for more
+information.)
+<p>The DB_MPOOLFILE-&gt;set_pgcookie function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_MPOOLFILE-&gt;set_pgcookie function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_MPOOLFILE-&gt;set_pgcookie function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_cachesize.html">DB_ENV-&gt;set_cachesize</a>,
+<a href="../api_c/env_set_mp_mmapsize.html">DB_ENV-&gt;set_mp_mmapsize</a>,
+<a href="../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a>,
+<a href="../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a>,
+<a href="../api_c/memp_fget.html">DB_MPOOLFILE-&gt;get</a>,
+<a href="../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a>,
+<a href="../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a>,
+<a href="../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a>,
+<a href="../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a>,
+<a href="../api_c/memp_register.html">DB_ENV-&gt;memp_register</a>,
+<a href="../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a>,
+<a href="../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a>,
+<a href="../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a>,
+<a href="../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a>,
+<a href="../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a>,
+<a href="../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>,
+<a href="../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a>,
+and
+<a href="../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_c/txn_set_timeout.html b/db/docs/api_c/txn_set_timeout.html
new file mode 100644
index 000000000..a53a290bd
--- /dev/null
+++ b/db/docs/api_c/txn_set_timeout.html
@@ -0,0 +1,80 @@
+<!--Id: txn_set_timeout.so,v 10.2 2001/09/07 21:43:17 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DB_TXN-&gt;set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DB_TXN-&gt;set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db.h&gt;
+<p>
+u_int32_t
+DB_TXN-&gt;set_timeout(DB_TXN *tid, db_timeout_t timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DB_TXN-&gt;set_timeout function sets timeout values for locks or
+transactions for the specified transaction. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this transaction.
+<p><dt><a name="DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for this transaction.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values may be specified for the database environment as a whole.
+See <a href="../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a> and for more information.
+<p>The DB_TXN-&gt;set_timeout function returns a non-zero error value on failure and 0 on success.
+<h1>Errors</h1>
+<p>The DB_TXN-&gt;set_timeout function may fail and return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DB_TXN-&gt;set_timeout function may fail and return a non-zero error for errors specified for other Berkeley DB and C library or system functions.
+If a catastrophic error has occurred, the DB_TXN-&gt;set_timeout function may fail and return
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>See Also</h1>
+<a href="../api_c/env_set_tx_max.html">DB_ENV-&gt;set_tx_max</a>,
+<a href="../api_c/env_set_tx_recover.html">DB_ENV-&gt;set_tx_recover</a>,
+<a href="../api_c/env_set_tx_timestamp.html">DB_ENV-&gt;set_tx_timestamp</a>,
+<a href="../api_c/txn_abort.html">DB_TXN-&gt;abort</a>,
+<a href="../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a>,
+<a href="../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a>,
+<a href="../api_c/txn_commit.html">DB_TXN-&gt;commit</a>,
+<a href="../api_c/txn_discard.html">DB_TXN-&gt;discard</a>,
+<a href="../api_c/txn_id.html">DB_TXN-&gt;id</a>,
+<a href="../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a>,
+<a href="../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a>,
+<a href="../api_c/txn_set_timeout.html">DB_TXN-&gt;set_timeout</a>
+and
+<a href="../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_c/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/c_index.html b/db/docs/api_cxx/c_index.html
new file mode 100644
index 000000000..8aba44c15
--- /dev/null
+++ b/db/docs/api_cxx/c_index.html
@@ -0,0 +1,166 @@
+<!--Id: c_index.so,v 10.80 2001/10/13 19:56:20 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: C++ Interface</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>C++ Interface</h1>
+<p><table border=1 align=center>
+<tr><th>Section</th><th>Class/Method</th><th>Description</th></tr>
+<tr><td><b>Database Environment</b></td><td><a href="../api_cxx/dbenv_class.html">DbEnv</a></td><td>Create an environment handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_close.html">DbEnv::close</a></td><td>Close an environment</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_err.html">DbEnv::err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_err.html">DbEnv::errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_open.html">DbEnv::open</a></td><td>Open an environment</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_remove.html">DbEnv::remove</a></td><td>Remove an environment</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_feedback.html">DbEnv::set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_rec_init.html">DbEnv::set_recovery_init</a></td><td>Set recovery initialization callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_rpc_server.html">DbEnv::set_rpc_server</a></td><td>Establish an RPC server connection</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_shm_key.html">DbEnv::set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tas_spins.html">DbEnv::set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tmp_dir.html">DbEnv::set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_timeout.html">DbEnv::set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_strerror.html">DbEnv::strerror</a></td><td>Error strings</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_version.html">DbEnv::version</a></td><td>Return version information</td></tr>
+<tr><td><b>Database Operations</b></td><td><a href="../api_cxx/db_class.html">Db</a></td><td>Create a database handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_associate.html">Db::associate</a></td><td>Associate a secondary index</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_close.html">Db::close</a></td><td>Close a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_del.html">Db::del</a></td><td>Delete items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_fd.html">Db::fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_get.html">Db::get</a>, <a href="../api_cxx/db_get.html">Db::pget</a></td><td>Get items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_get_type.html">Db::get_type</a></td><td>Return the database type</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_join.html">Db::join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_key_range.html">Db::key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_open.html">Db::open</a></td><td>Open a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_put.html">Db::put</a></td><td>Store items into a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_remove.html">Db::remove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_rename.html">Db::rename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_stat.html">Db::stat</a></td><td>Return database statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_sync.html">Db::sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_truncate.html">Db::truncate</a></td><td>Empty a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_upgrade.html">Db::upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_verify.html">Db::verify</a></td><td>Verify/salvage a database</td></tr>
+<tr><td><b>Database Configuration</b></td><td><a href="../api_cxx/db_err.html">Db::err</a></td><td>Error message with error string</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_err.html">Db::errx</a></td><td>Error message</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_alloc.html">Db::set_alloc</a></td><td>Set local space allocation functions</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a></td><td>Set error message FILE</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_feedback.html">Db::set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_flags.html">Db::set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a></td><td>Set panic callback</td></tr>
+<tr><td><b>Btree/Recno Configuration</b></td><td><a href="../api_cxx/db_set_append_recno.html">Db::set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><b>Hash Configuration</b></td><td><a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><b>Queue Configuration</b></td><td><a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><b>Database Cursor Operations</b></td><td><a href="../api_cxx/dbc_class.html">Dbc</a></td><td><b>Cursor class</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/db_cursor.html">Db::cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_close.html">Dbc::close</a></td><td>Close a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_count.html">Dbc::count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_del.html">Dbc::del</a></td><td>Delete by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_dup.html">Dbc::dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_get.html">Dbc::get</a>, <a href="../api_cxx/dbc_get.html">Dbc::pget</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/dbc_put.html">Dbc::put</a></td><td>Store by cursor</td></tr>
+<tr><td><b>Key/Data Pairs</b></td><td><a href="../api_cxx/dbt_class.html">Dbt</a></td><td><br></td></tr>
+<tr><td><b>Bulk Retrieval</b></td><td><a href="../api_cxx/dbt_bulk.html#DB_MULTIPLE_INIT">DB_MULTIPLE_INIT</a></td><td><br></td></tr>
+<tr><td><b>Lock Subsystem</b></td><td><a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_get.html">DbEnv::lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_id.html">DbEnv::lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_id_free.html">DbEnv::lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_put.html">DbEnv::lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a></td><td>Acquire/release locks</td></tr>
+<tr><td><b>Log Subsystem</b></td><td><a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lg_dir.html">DbEnv::set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_lg_regionmax.html">DbEnv::set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_archive.html">DbEnv::log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_file.html">DbEnv::log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_flush.html">DbEnv::log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_put.html">DbEnv::log_put</a></td><td>Write a log record</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_register.html">DbEnv::log_register</a></td><td>Register a filename with the log manager</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_stat.html">DbEnv::log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a></td><td>Unregister a filename with the log manager</td></tr>
+<tr><td><b>Log Cursor Operations</b></td><td><a href="../api_cxx/logc_class.html">DbLogc</a></td><td><b>Log cursor class</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_cursor.html">DbEnv::log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/logc_close.html">DbLogc::logc_close</a></td><td>Close a log cursor</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/logc_get.html">DbLogc::logc_get</a></td><td>Retrieve a log record</td></tr>
+<tr><td><b>Log Sequence Numbers</b></td><td><a href="../api_cxx/lsn_class.html">DbLsn</a></td><td><br></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/log_compare.html">DbEnv::log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><b>Memory Pool Subsystem</b></td><td><a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_register.html">DbEnv::memp_register</a></td><td>Register input/output functions for a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a></td><td>Flush pages from a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><b>Memory Pool Files</b></td><td><a href="../api_cxx/mempfile_class.html">DbMpoolFile</a></td><td><b>Memory Pool File class</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fcreate.html">DbEnv::memp_fcreate</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a></td><td>Close a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a></td><td>Get page from a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a></td><td>Open a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a></td><td>Return a page to a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a></td><td>Set memory pool page status</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a></td><td>Flush pages from a file in a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a></td><td>Set file page bytes to be cleared</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_fileid.html">DbMpoolFile::set_fileid</a></td><td>Set file unique identifier</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_ftype.html">DbMpoolFile::set_ftype</a></td><td>Set file type</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a></td><td>Set file log-sequence-number offset</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a></td><td>Set file cookie for pgin/pgout</td></tr>
+<tr><td><b>Transaction Subsystem</b></td><td><a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a></td><td>Set transaction abort recover function</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_recover.html">DbEnv::txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><b>Transactions</b></td><td><a href="../api_cxx/txn_class.html">DbTxn</a></td><td><b>Transaction class</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_abort.html">DbTxn::abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_commit.html">DbTxn::commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_discard.html">DbTxn::discard</a></td><td>Discard a prepared but not resolved transaction handle</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_id.html">DbTxn::id</a></td><td>Return a transaction's ID</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/txn_set_timeout.html">DbTxn::set_timeout</a></td><td>Set transaction timeout</td></tr>
+<tr><td><b>Replication</b></td><td><a href="../api_cxx/rep_transport.html">DbEnv::set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/rep_elect.html">DbEnv::rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/rep_message.html">DbEnv::rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/rep_start.html">DbEnv::rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><b>Exceptions</b></td><td><a href="../api_cxx/except_class.html">DbException</a></td><td><b>Exception Class for Berkeley DB Activity</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/get_errno.html">DbException::get_errno</a></td><td>Get the error value</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/what.html">DbException::what</a></td><td>Get the error string</td></tr>
+<tr><td><br></td><td><a href="../api_cxx/mem_class.html">DbMemoryException</a></td><td><b>Exception Class for insufficient memory</b></td></tr>
+<tr><td><br></td><td><a href="../api_cxx/get_dbt.html">DbMemoryException::get_dbt</a></td><td>Get the failing Dbt</td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/env_set_timeout.html b/db/docs/api_cxx/env_set_timeout.html
new file mode 100644
index 000000000..a267862f6
--- /dev/null
+++ b/db/docs/api_cxx/env_set_timeout.html
@@ -0,0 +1,110 @@
+<!--Id: env_set_timeout.so,v 10.2 2001/09/07 21:43:16 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::set_timeout(db_timeout_t timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::set_timeout method sets timeout values for locks or
+transactions in the database environment. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this database environment.
+<p>The database environment's transaction timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_txn_timeout", one or more whitespace characters,
+and the transaction timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<p><dt><a name="DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for transactions in this database environment.
+<p>The database environment's lock timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lock_timeout", one or more whitespace characters,
+and the lock timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values specified for the database environment may be overridden
+on a per-lock or per-transaction basis. See <a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a> and
+<a href="../api_cxx/txn_set_timeout.html">DbTxn::set_timeout</a> for more information.
+<p>The DbEnv::set_timeout interface may be used only to configure Berkeley DB before
+the <a href="../api_cxx/env_open.html">DbEnv::open</a> interface is called.
+<p>The DbEnv::set_timeout method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::set_timeout method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_cxx/env_open.html">DbEnv::open</a> was called.
+</dl>
+<p>The DbEnv::set_timeout method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::set_timeout method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_close.html">DbEnv::close</a>,
+<a href="../api_cxx/env_err.html">DbEnv::err</a>, <a href="../api_cxx/env_err.html">DbEnv::errx</a>
+<a href="../api_cxx/env_open.html">DbEnv::open</a>,
+<a href="../api_cxx/env_remove.html">DbEnv::remove</a>,
+<a href="../api_cxx/env_set_alloc.html">DbEnv::set_alloc</a>,
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_data_dir.html">DbEnv::set_data_dir</a>,
+<a href="../api_cxx/env_set_errcall.html">DbEnv::set_errcall</a>,
+<a href="../api_cxx/env_set_errfile.html">DbEnv::set_errfile</a>,
+<a href="../api_cxx/env_set_error_stream.html">DbEnv::set_error_stream</a>,
+<a href="../api_cxx/env_set_errpfx.html">DbEnv::set_errpfx</a>,
+<a href="../api_cxx/env_set_feedback.html">DbEnv::set_feedback</a>,
+<a href="../api_cxx/env_set_flags.html">DbEnv::set_flags</a>,
+<a href="../api_cxx/env_set_paniccall.html">DbEnv::set_paniccall</a>,
+<a href="../api_cxx/env_set_rec_init.html">DbEnv::set_recovery_init</a>,
+<a href="../api_cxx/env_set_rpc_server.html">DbEnv::set_rpc_server</a>,
+<a href="../api_cxx/env_set_shm_key.html">DbEnv::set_shm_key</a>,
+<a href="../api_cxx/env_set_tas_spins.html">DbEnv::set_tas_spins</a>,
+<a href="../api_cxx/env_set_tmp_dir.html">DbEnv::set_tmp_dir</a>,
+<a href="../api_cxx/env_set_timeout.html">DbEnv::set_timeout</a>,
+<a href="../api_cxx/env_set_verbose.html">DbEnv::set_verbose</a>,
+<a href="../api_cxx/env_strerror.html">DbEnv::strerror</a>
+and
+<a href="../api_cxx/env_version.html">DbEnv::version</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/lock_id_free.html b/db/docs/api_cxx/lock_id_free.html
new file mode 100644
index 000000000..4366d17e6
--- /dev/null
+++ b/db/docs/api_cxx/lock_id_free.html
@@ -0,0 +1,64 @@
+<!--Id: lock_id_free.so,v 10.1 2001/09/25 15:18:55 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::lock_id_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::lock_id_free</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::lock_id_free(u_int32_t id);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::lock_id_free method frees a locker ID allocated by the
+DbEnv::lock_id method.
+<p>The DbEnv::lock_id_free method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The locker ID is invalid or locks are still held by this locker ID.
+</dl>
+<p>The DbEnv::lock_id_free method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::lock_id_free method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lk_conflicts.html">DbEnv::set_lk_conflicts</a>,
+<a href="../api_cxx/env_set_lk_detect.html">DbEnv::set_lk_detect</a>,
+<a href="../api_cxx/env_set_lk_max_lockers.html">DbEnv::set_lk_max_lockers</a>,
+<a href="../api_cxx/env_set_lk_max_locks.html">DbEnv::set_lk_max_locks</a>,
+<a href="../api_cxx/env_set_lk_max_objects.html">DbEnv::set_lk_max_objects</a>,
+<a href="../api_cxx/lock_detect.html">DbEnv::lock_detect</a>,
+<a href="../api_cxx/lock_get.html">DbEnv::lock_get</a>,
+<a href="../api_cxx/lock_id.html">DbEnv::lock_id</a>,
+<a href="../api_cxx/lock_id_free.html">DbEnv::lock_id_free</a>,
+<a href="../api_cxx/lock_put.html">DbEnv::lock_put</a>,
+<a href="../api_cxx/lock_stat.html">DbEnv::lock_stat</a>,
+and
+<a href="../api_cxx/lock_vec.html">DbEnv::lock_vec</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/log_cursor.html b/db/docs/api_cxx/log_cursor.html
new file mode 100644
index 000000000..e8463b4e0
--- /dev/null
+++ b/db/docs/api_cxx/log_cursor.html
@@ -0,0 +1,98 @@
+<!--Id: log_cursor.so,v 10.1 2001/09/28 15:09:35 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::log_cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::log_cursor</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::log_cursor(DbLogc **cursorp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::log_cursor method
+creates a log cursor and copies a pointer to it into the memory to which
+<b>cursorp</b> refers.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbEnv::log_cursor method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::log_cursor method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv::log_cursor method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::log_cursor method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_cxx/db_associate.html">Db::associate</a>,
+<a href="../api_cxx/db_close.html">Db::close</a>,
+<a href="../api_cxx/db_cursor.html">Db::cursor</a>,
+<a href="../api_cxx/db_del.html">Db::del</a>,
+<a href="../api_cxx/db_err.html">Db::err</a>, <a href="../api_cxx/db_err.html">Db::errx</a>
+<a href="../api_cxx/db_fd.html">Db::fd</a>,
+<a href="../api_cxx/db_get.html">Db::get</a>,
+<a href="../api_cxx/db_get.html">Db::pget</a>,
+<a href="../api_cxx/db_get_byteswapped.html">Db::get_byteswapped</a>,
+<a href="../api_cxx/db_get_type.html">Db::get_type</a>,
+<a href="../api_cxx/db_join.html">Db::join</a>,
+<a href="../api_cxx/db_key_range.html">Db::key_range</a>,
+<a href="../api_cxx/db_open.html">Db::open</a>,
+<a href="../api_cxx/db_put.html">Db::put</a>,
+<a href="../api_cxx/db_remove.html">Db::remove</a>,
+<a href="../api_cxx/db_rename.html">Db::rename</a>,
+<a href="../api_cxx/db_set_alloc.html">Db::set_alloc</a>,
+<a href="../api_cxx/db_set_append_recno.html">Db::set_append_recno</a>,
+<a href="../api_cxx/db_set_bt_compare.html">Db::set_bt_compare</a>,
+<a href="../api_cxx/db_set_bt_minkey.html">Db::set_bt_minkey</a>,
+<a href="../api_cxx/db_set_bt_prefix.html">Db::set_bt_prefix</a>,
+<a href="../api_cxx/db_set_cachesize.html">Db::set_cachesize</a>,
+<a href="../api_cxx/db_set_dup_compare.html">Db::set_dup_compare</a>,
+<a href="../api_cxx/db_set_errcall.html">Db::set_errcall</a>,
+<a href="../api_cxx/db_set_errfile.html">Db::set_errfile</a>,
+<a href="../api_cxx/db_set_errpfx.html">Db::set_errpfx</a>,
+<a href="../api_cxx/db_set_feedback.html">Db::set_feedback</a>,
+<a href="../api_cxx/db_set_flags.html">Db::set_flags</a>,
+<a href="../api_cxx/db_set_h_ffactor.html">Db::set_h_ffactor</a>,
+<a href="../api_cxx/db_set_h_hash.html">Db::set_h_hash</a>,
+<a href="../api_cxx/db_set_h_nelem.html">Db::set_h_nelem</a>,
+<a href="../api_cxx/db_set_lorder.html">Db::set_lorder</a>,
+<a href="../api_cxx/db_set_pagesize.html">Db::set_pagesize</a>,
+<a href="../api_cxx/db_set_paniccall.html">Db::set_paniccall</a>,
+<a href="../api_cxx/db_set_q_extentsize.html">Db::set_q_extentsize</a>,
+<a href="../api_cxx/db_set_re_delim.html">Db::set_re_delim</a>,
+<a href="../api_cxx/db_set_re_len.html">Db::set_re_len</a>,
+<a href="../api_cxx/db_set_re_pad.html">Db::set_re_pad</a>,
+<a href="../api_cxx/db_set_re_source.html">Db::set_re_source</a>,
+<a href="../api_cxx/db_stat.html">Db::stat</a>,
+<a href="../api_cxx/db_sync.html">Db::sync</a>,
+<a href="../api_cxx/db_truncate.html">Db::truncate</a>,
+<a href="../api_cxx/db_upgrade.html">Db::upgrade</a>,
+and
+<a href="../api_cxx/db_verify.html">Db::verify</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/logc_class.html b/db/docs/api_cxx/logc_class.html
new file mode 100644
index 000000000..c83c12e95
--- /dev/null
+++ b/db/docs/api_cxx/logc_class.html
@@ -0,0 +1,57 @@
+<!--Id: logc_class.so,v 1.1 2001/09/28 15:09:37 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+class DbLogc { ... };
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the DbLogc class,
+which provides cursor support for log files.
+<p>The DbLogc functions are the library interface supporting
+sequential access to the records stored in log files. Cursors are
+created by calling the <a href="../api_cxx/log_cursor.html">DbEnv::log_cursor</a> method which returns a pointer to a DbLogc object.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_dir.html">DbEnv::set_lg_dir</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/env_set_lg_regionmax.html">DbEnv::set_lg_regionmax</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_cursor.html">DbEnv::log_cursor</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>,
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>,
+<a href="../api_cxx/logc_close.html">DbLogc::logc_close</a>
+and
+<a href="../api_cxx/logc_get.html">DbLogc::logc_get</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/logc_close.html b/db/docs/api_cxx/logc_close.html
new file mode 100644
index 000000000..22c8f5052
--- /dev/null
+++ b/db/docs/api_cxx/logc_close.html
@@ -0,0 +1,70 @@
+<!--Id: logc_close.so,v 10.3 2001/10/02 01:33:35 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc::logc_close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc::logc_close</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbLogc::close(u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc::logc_close method discards the log cursor. After DbLogc::logc_close
+has been called, regardless of its return, the cursor handle may not be
+used again.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbLogc::logc_close method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbLogc::logc_close method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>The DbLogc::logc_close method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLogc::logc_close method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_dir.html">DbEnv::set_lg_dir</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/env_set_lg_regionmax.html">DbEnv::set_lg_regionmax</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_cursor.html">DbEnv::log_cursor</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>,
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>,
+<a href="../api_cxx/logc_close.html">DbLogc::logc_close</a>
+and
+<a href="../api_cxx/logc_get.html">DbLogc::logc_get</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/logc_get.html b/db/docs/api_cxx/logc_get.html
new file mode 100644
index 000000000..bb5e4a3c7
--- /dev/null
+++ b/db/docs/api_cxx/logc_get.html
@@ -0,0 +1,113 @@
+<!--Id: logc_get.so,v 10.30 2001/09/29 15:48:08 dda Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc::logc_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc::logc_get</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbLogc::get(DbLsn *lsn, Dbt *data, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc::logc_get method retrieve records from the log according to the
+<b>lsn</b> and <b>flags</b> arguments.
+<p>The data field of the <b>data</b> structure is set to the record
+retrieved, and the size field indicates the number of bytes in the
+record. See <a href="../api_cxx/dbt_class.html">Dbt</a> for a description of other fields in the
+<b>data</b> structure. The <a href="../api_cxx/dbt_class.html#DB_DBT_MALLOC">DB_DBT_MALLOC</a>,
+<a href="../api_cxx/dbt_class.html#DB_DBT_REALLOC">DB_DBT_REALLOC</a> and <a href="../api_cxx/dbt_class.html#DB_DBT_USERMEM">DB_DBT_USERMEM</a> flags may be specified
+for any <a href="../api_cxx/dbt_class.html">Dbt</a> used for data retrieval.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_CHECKPOINT">DB_CHECKPOINT</a><dd>The last record written with the DB_CHECKPOINT flag specified to the
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a> method is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the record
+returned. If no record has been previously written with the DB_CHECKPOINT
+flag specified, the first record in the log is returned.
+<p>If the log is empty, the DbLogc::logc_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_FIRST">DB_FIRST</a><dd>The first record from any of the log files found in the log directory
+is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the log is empty, the DbLogc::logc_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_LAST">DB_LAST</a><dd>The last record in the log is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the log is empty, the DbLogc::logc_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p><dt><a name="DB_NEXT">DB_NEXT</a>, <a name="DB_PREV">DB_PREV</a><dd>The current log position is advanced to the next (previous) record in
+the log, and that record is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_cxx/lsn_class.html">DbLsn</a> of the record
+returned.
+<p>If the pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, DbLogc::logc_get will return the first (last) record
+in the log. If the last (first) log record has already been returned
+or the log is empty, the DbLogc::logc_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">DB_NOTFOUND</a>.
+<p>If the log was opened with the DB_THREAD flag set, calls to
+DbLogc::logc_get with the DB_NEXT (DB_PREV) flag set will return
+EINVAL.
+<p><dt><a name="DB_CURRENT">DB_CURRENT</a><dd>Return the log record to which the log currently refers.
+<p>If the log pointer has not been initialized via DB_FIRST, DB_LAST,
+DB_SET, DB_NEXT, or DB_PREV, or if the log was opened with the DB_THREAD
+flag set, DbLogc::logc_get will return EINVAL.
+<p><dt><a name="DB_SET">DB_SET</a><dd>Retrieve the record specified by the <b>lsn</b> argument. If the
+specified <a href="../api_cxx/lsn_class.html">DbLsn</a> is invalid (for example, it does not appear in
+the log) DbLogc::logc_get will return EINVAL.
+</dl>
+<p>Otherwise, the DbLogc::logc_get method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbLogc::logc_get method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_CURRENT flag was set and the log pointer had not yet been
+initialized.
+<p>The DB_SET flag was set and the specified log sequence number does not
+exist.
+</dl>
+<p>The DbLogc::logc_get method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLogc::logc_get method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_lg_bsize.html">DbEnv::set_lg_bsize</a>,
+<a href="../api_cxx/env_set_lg_dir.html">DbEnv::set_lg_dir</a>,
+<a href="../api_cxx/env_set_lg_max.html">DbEnv::set_lg_max</a>,
+<a href="../api_cxx/env_set_lg_regionmax.html">DbEnv::set_lg_regionmax</a>,
+<a href="../api_cxx/log_archive.html">DbEnv::log_archive</a>,
+<a href="../api_cxx/log_compare.html">DbEnv::log_compare</a>,
+<a href="../api_cxx/log_cursor.html">DbEnv::log_cursor</a>,
+<a href="../api_cxx/log_file.html">DbEnv::log_file</a>,
+<a href="../api_cxx/log_flush.html">DbEnv::log_flush</a>,
+<a href="../api_cxx/log_put.html">DbEnv::log_put</a>,
+<a href="../api_cxx/log_register.html">DbEnv::log_register</a>,
+<a href="../api_cxx/log_stat.html">DbEnv::log_stat</a>,
+<a href="../api_cxx/log_unregister.html">DbEnv::log_unregister</a>,
+<a href="../api_cxx/logc_close.html">DbLogc::logc_close</a>
+and
+<a href="../api_cxx/logc_get.html">DbLogc::logc_get</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/memp_fcreate.html b/db/docs/api_cxx/memp_fcreate.html
new file mode 100644
index 000000000..014688c6c
--- /dev/null
+++ b/db/docs/api_cxx/memp_fcreate.html
@@ -0,0 +1,69 @@
+<!--Id: memp_fcreate.so,v 10.3 2001/07/31 17:25:40 dda Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv::memp_fcreate</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv::memp_fcreate</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbEnv::memp_fcreate(DbMpoolFile **dbmfp, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv::memp_fcreate method creates a DbMpoolFile structure that
+is the handle for a Berkeley DB shared memory buffer pool file. A pointer to
+this structure is returned in the memory to which <b>dbmfp</b> refers.
+Calling the <a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a> method will discard the returned handle.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbEnv::memp_fcreate method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbEnv::memp_fcreate method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv::memp_fcreate method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fcreate.html">DbEnv::memp_fcreate</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a>,
+<a href="../api_cxx/memp_set_fileid.html">DbMpoolFile::set_fileid</a>,
+<a href="../api_cxx/memp_set_ftype.html">DbMpoolFile::set_ftype</a>,
+<a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a>,
+<a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>,
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/memp_set_clear_len.html b/db/docs/api_cxx/memp_set_clear_len.html
new file mode 100644
index 000000000..0574fff1c
--- /dev/null
+++ b/db/docs/api_cxx/memp_set_clear_len.html
@@ -0,0 +1,69 @@
+<!--Id: memp_set_clear_len.so,v 10.1 2001/07/24 18:31:14 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_clear_len</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_clear_len</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(u_int32_t len);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set_clear_len method sets the number of initial bytes in a
+page that should be set to nul when the page is created as a result of
+the <a href="../api_cxx/memp_fget.html#DB_MPOOL_CREATE">DB_MPOOL_CREATE</a> or <a href="../api_cxx/memp_fget.html#DB_MPOOL_NEW">DB_MPOOL_NEW</a> flags being
+specified to <a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>. If no value is specified, or <b>len</b>
+is 0, the entire page is cleared.
+<p>The DbMpoolFile::set_clear_len method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_clear_len method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_clear_len method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fcreate.html">DbEnv::memp_fcreate</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a>,
+<a href="../api_cxx/memp_set_fileid.html">DbMpoolFile::set_fileid</a>,
+<a href="../api_cxx/memp_set_ftype.html">DbMpoolFile::set_ftype</a>,
+<a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a>,
+<a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>,
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/memp_set_fileid.html b/db/docs/api_cxx/memp_set_fileid.html
new file mode 100644
index 000000000..38d3a889b
--- /dev/null
+++ b/db/docs/api_cxx/memp_set_fileid.html
@@ -0,0 +1,90 @@
+<!--Id: memp_set_fileid.so,v 10.1 2001/07/24 18:31:14 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_fileid</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_fileid</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(u_int8_t *fileid);
+</pre></h3>
+<h1>Description</h1>
+<p>The shared memory buffer pool functions must be able to uniquely
+identify files in order that multiple processes wanting to share a file
+will correctly identify it in the pool. The DbMpoolFile::set_fileid method
+specifies a unique identifier for the file. Unique file identifiers
+must be a DB_FILE_ID_LEN length array of bytes.
+<p>On most UNIX/POSIX systems, the <b>fileid</b> field will not need to
+be set, and the memory pool functions will use the file's device and
+inode numbers for this purpose. On Windows systems, the memory pool
+functions use the values returned by GetFileInformationByHandle() by
+default -- these values are known to be constant between processes and
+over reboot in the case of NTFS (in which they are the NTFS MFT
+indices).
+<p>On other filesystems (for example, FAT or NFS), these default values
+are not necessarily unique between processes or across system reboots.
+<b>Applications wanting to maintain a shared memory buffer pool
+between processes or across system reboots, in which the pool contains
+pages from files stored on such filesystems, must specify a unique file
+identifier using the DbMpoolFile::set_fileid method, and each process opening
+the file must provide the same unique identifier.</b>
+<p>This call should not be necessary for most applications. Specifically,
+it is not necessary if the memory pool is not shared between processes
+and is reinstantiated after each system reboot, if the application is
+using the Berkeley DB access methods instead of calling the pool functions
+explicitly, or if the files in the memory pool are stored on filesystems
+in which the default values as described previously are invariant
+between process and across system reboots.
+<p>The DbMpoolFile::set_fileid method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_fileid method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_fileid method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fcreate.html">DbEnv::memp_fcreate</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a>,
+<a href="../api_cxx/memp_set_fileid.html">DbMpoolFile::set_fileid</a>,
+<a href="../api_cxx/memp_set_ftype.html">DbMpoolFile::set_ftype</a>,
+<a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a>,
+<a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>,
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/memp_set_ftype.html b/db/docs/api_cxx/memp_set_ftype.html
new file mode 100644
index 000000000..d87576141
--- /dev/null
+++ b/db/docs/api_cxx/memp_set_ftype.html
@@ -0,0 +1,70 @@
+<!--Id: memp_set_ftype.so,v 10.1 2001/07/24 18:31:14 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_ftype</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_ftype</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(int ftype);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set_ftype method specifies a file type for the purposes of
+input or output processing of the files pages as they are read from or
+written to, the backing filesystem store. The <b>ftype</b> argument
+must be the same as a <b>ftype</b> argument previously specified to
+the <a href="../api_cxx/memp_register.html">DbEnv::memp_register</a> method. (See the <a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>
+documentation for more information.)
+<p>The DbMpoolFile::set_ftype method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_ftype method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_ftype method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fcreate.html">DbEnv::memp_fcreate</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a>,
+<a href="../api_cxx/memp_set_fileid.html">DbMpoolFile::set_fileid</a>,
+<a href="../api_cxx/memp_set_ftype.html">DbMpoolFile::set_ftype</a>,
+<a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a>,
+<a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>,
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/memp_set_lsn_offset.html b/db/docs/api_cxx/memp_set_lsn_offset.html
new file mode 100644
index 000000000..5173fbb15
--- /dev/null
+++ b/db/docs/api_cxx/memp_set_lsn_offset.html
@@ -0,0 +1,68 @@
+<!--Id: memp_set_lsn_offset.so,v 10.1 2001/07/24 18:31:14 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_lsn_offset</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_lsn_offset</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(int32_t lsn_offset);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set_lsn_offset method specifies the zero-based byte offset
+of a log sequence number (<a href="../api_cxx/lsn_class.html">DbLsn</a>) on the file's pages, for the
+purposes of page-flushing as part of transaction checkpoint. (See the
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a> documentation for more information.)
+<p>The DbMpoolFile::set_lsn_offset method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_lsn_offset method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_lsn_offset method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fcreate.html">DbEnv::memp_fcreate</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a>,
+<a href="../api_cxx/memp_set_fileid.html">DbMpoolFile::set_fileid</a>,
+<a href="../api_cxx/memp_set_ftype.html">DbMpoolFile::set_ftype</a>,
+<a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a>,
+<a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>,
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/memp_set_pgcookie.html b/db/docs/api_cxx/memp_set_pgcookie.html
new file mode 100644
index 000000000..1ed2548de
--- /dev/null
+++ b/db/docs/api_cxx/memp_set_pgcookie.html
@@ -0,0 +1,69 @@
+<!--Id: memp_set_pgcookie.so,v 10.1 2001/07/24 18:31:15 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbMpoolFile::set_pgcookie</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbMpoolFile::set_pgcookie</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+int
+DbMpoolFile::set(DBT *pgcookie);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbMpoolFile::set_pgcookie method specifies a byte string that is provided
+to the functions registered to do input or output processing of the
+file's pages as they are read from or written to, the backing filesystem
+store. (See the <a href="../api_cxx/memp_register.html">DbEnv::memp_register</a> documentation for more
+information.)
+<p>The DbMpoolFile::set_pgcookie method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbMpoolFile::set_pgcookie method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbMpoolFile::set_pgcookie method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/mempfile_class.html">DbMpoolFile</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_cachesize.html">DbEnv::set_cachesize</a>,
+<a href="../api_cxx/env_set_mp_mmapsize.html">DbEnv::set_mp_mmapsize</a>,
+<a href="../api_cxx/memp_fcreate.html">DbEnv::memp_fcreate</a>,
+<a href="../api_cxx/memp_fclose.html">DbMpoolFile::close</a>,
+<a href="../api_cxx/memp_fget.html">DbMpoolFile::get</a>,
+<a href="../api_cxx/memp_fopen.html">DbMpoolFile::open</a>,
+<a href="../api_cxx/memp_fput.html">DbMpoolFile::put</a>,
+<a href="../api_cxx/memp_fset.html">DbMpoolFile::set</a>,
+<a href="../api_cxx/memp_fsync.html">DbMpoolFile::sync</a>,
+<a href="../api_cxx/memp_register.html">DbEnv::memp_register</a>,
+<a href="../api_cxx/memp_set_clear_len.html">DbMpoolFile::set_clear_len</a>,
+<a href="../api_cxx/memp_set_fileid.html">DbMpoolFile::set_fileid</a>,
+<a href="../api_cxx/memp_set_ftype.html">DbMpoolFile::set_ftype</a>,
+<a href="../api_cxx/memp_set_lsn_offset.html">DbMpoolFile::set_lsn_offset</a>,
+<a href="../api_cxx/memp_set_pgcookie.html">DbMpoolFile::set_pgcookie</a>,
+<a href="../api_cxx/memp_stat.html">DbEnv::memp_stat</a>,
+<a href="../api_cxx/memp_sync.html">DbEnv::memp_sync</a>,
+and
+<a href="../api_cxx/memp_trickle.html">DbEnv::memp_trickle</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_cxx/txn_set_timeout.html b/db/docs/api_cxx/txn_set_timeout.html
new file mode 100644
index 000000000..da3e6ea3d
--- /dev/null
+++ b/db/docs/api_cxx/txn_set_timeout.html
@@ -0,0 +1,84 @@
+<!--Id: txn_set_timeout.so,v 10.2 2001/09/07 21:43:17 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn::set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn::set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+#include &lt;db_cxx.h&gt;
+<p>
+u_int32_t
+DbTxn::set_timeout(db_timeout_t timeout, u_int32_t flags);
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn::set_timeout method sets timeout values for locks or
+transactions for the specified transaction. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="DB_SET_LOCK_TIMEOUT">DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this transaction.
+<p><dt><a name="DB_SET_TXN_TIMEOUT">DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for this transaction.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values may be specified for the database environment as a whole.
+See <a href="../api_cxx/env_set_timeout.html">DbEnv::set_timeout</a> and for more information.
+<p>The DbTxn::set_timeout method either returns a non-zero error value or throws an exception that
+encapsulates a non-zero error value on failure, and returns 0 on success.
+<h1>Errors</h1>
+<p>The DbTxn::set_timeout method may fail and throw an exception or return a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbTxn::set_timeout method may fail and throw an exception or return a non-zero error for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn::set_timeout method may fail and either
+return <a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a> or throw an exception encapsulating
+<a href="../ref/program/errorret.html#DB_RUNRECOVERY">DB_RUNRECOVERY</a>, in which case all subsequent Berkeley DB calls will fail
+in the same way.
+<h1>Class</h1>
+<a href="../api_cxx/dbenv_class.html">DbEnv</a>, <a href="../api_cxx/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_cxx/env_set_tx_max.html">DbEnv::set_tx_max</a>,
+<a href="../api_cxx/env_set_tx_recover.html">DbEnv::set_tx_recover</a>,
+<a href="../api_cxx/env_set_tx_timestamp.html">DbEnv::set_tx_timestamp</a>,
+<a href="../api_cxx/txn_abort.html">DbTxn::abort</a>,
+<a href="../api_cxx/txn_begin.html">DbEnv::txn_begin</a>,
+<a href="../api_cxx/txn_checkpoint.html">DbEnv::txn_checkpoint</a>,
+<a href="../api_cxx/txn_commit.html">DbTxn::commit</a>,
+<a href="../api_cxx/txn_discard.html">DbTxn::discard</a>,
+<a href="../api_cxx/txn_id.html">DbTxn::id</a>,
+<a href="../api_cxx/txn_prepare.html">DbTxn::prepare</a>,
+<a href="../api_cxx/txn_recover.html">DbEnv::txn_recover</a>,
+<a href="../api_cxx/txn_set_timeout.html">DbTxn::set_timeout</a>
+and
+<a href="../api_cxx/txn_stat.html">DbEnv::txn_stat</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_cxx/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_java/c_index.html b/db/docs/api_java/c_index.html
new file mode 100644
index 000000000..2647f7042
--- /dev/null
+++ b/db/docs/api_java/c_index.html
@@ -0,0 +1,144 @@
+<!--Id: c_index.so,v 10.80 2001/10/13 19:56:20 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: Java Interface</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<h1 align=center>Java Interface</h1>
+<p><table border=1 align=center>
+<tr><th>Section</th><th>Class/Method</th><th>Description</th></tr>
+<tr><td><b>Database Environment</b></td><td><a href="../api_java/dbenv_class.html">DbEnv</a></td><td>Create an environment handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_close.html">DbEnv.close</a></td><td>Close an environment</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_open.html">DbEnv.open</a></td><td>Open an environment</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_remove.html">DbEnv.remove</a></td><td>Remove an environment</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a></td><td>Set the environment data directory</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a></td><td>Set error message output stream</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_feedback.html">DbEnv.set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_flags.html">DbEnv.set_flags</a></td><td>Environment configuration</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_rec_init.html">DbEnv.set_recovery_init</a></td><td>Set recovery initialization callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_rpc_server.html">DbEnv.set_rpc_server</a></td><td>Establish an RPC server connection</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_shm_key.html">DbEnv.set_shm_key</a></td><td>Set system memory shared segment ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tas_spins.html">DbEnv.set_tas_spins</a></td><td>Set the number of test-and-set spins</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tmp_dir.html">DbEnv.set_tmp_dir</a></td><td>Set the environment temporary file directory</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_timeout.html">DbEnv.set_timeout</a></td><td>Set lock and transaction timeout</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a></td><td>Set verbose messages</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_strerror.html">DbEnv.strerror</a></td><td>Error strings</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_version.html">DbEnv.version</a></td><td>Return version information</td></tr>
+<tr><td><b>Database Operations</b></td><td><a href="../api_java/db_class.html">Db</a></td><td>Create a database handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_associate.html">Db.associate</a></td><td>Associate a secondary index</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_close.html">Db.close</a></td><td>Close a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_del.html">Db.del</a></td><td>Delete items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_fd.html">Db.fd</a></td><td>Return a file descriptor from a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_get.html">Db.get</a>, <a href="../api_java/db_get.html">Db.pget</a></td><td>Get items from a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a></td><td>Return if the underlying database is in host order</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_get_type.html">Db.get_type</a></td><td>Return the database type</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_join.html">Db.join</a></td><td>Perform a database join on cursors</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_key_range.html">Db.key_range</a></td><td>Return estimate of key location</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_open.html">Db.open</a></td><td>Open a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_put.html">Db.put</a></td><td>Store items into a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_remove.html">Db.remove</a></td><td>Remove a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_rename.html">Db.rename</a></td><td>Rename a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_stat.html">Db.stat</a></td><td>Return database statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_sync.html">Db.sync</a></td><td>Flush a database to stable storage</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_truncate.html">Db.truncate</a></td><td>Empty a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_upgrade.html">Db.upgrade</a></td><td>Upgrade a database</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_verify.html">Db.verify</a></td><td>Verify/salvage a database</td></tr>
+<tr><td><b>Database Configuration</b></td><td><a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a></td><td>Set the database cache size</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_dup_compare.html">Db.set_dup_compare</a></td><td>Set a duplicate comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_errcall.html">Db.set_errcall</a></td><td>Set error message callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a></td><td>Set error message prefix</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_feedback.html">Db.set_feedback</a></td><td>Set feedback callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_flags.html">Db.set_flags</a></td><td>General database configuration</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a></td><td>Set the underlying database page size</td></tr>
+<tr><td><b>Btree/Recno Configuration</b></td><td><a href="../api_java/db_set_append_recno.html">Db.set_append_recno</a></td><td>Set record append callback</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_bt_compare.html">Db.set_bt_compare</a></td><td>Set a Btree comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a></td><td>Set the minimum number of keys per Btree page</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_bt_prefix.html">Db.set_bt_prefix</a></td><td>Set a Btree prefix comparison function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a></td><td>Set the variable-length record delimiter</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_len.html">Db.set_re_len</a></td><td>Set the fixed-length record length</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a></td><td>Set the fixed-length record pad byte</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_re_source.html">Db.set_re_source</a></td><td>Set the backing Recno text file</td></tr>
+<tr><td><b>Hash Configuration</b></td><td><a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a></td><td>Set the Hash table density</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_h_hash.html">Db.set_h_hash</a></td><td>Set a hashing function</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a></td><td>Set the Hash table size</td></tr>
+<tr><td><br></td><td><a href="../api_java/db_set_lorder.html">Db.set_lorder</a></td><td>Set the database byte order</td></tr>
+<tr><td><b>Queue Configuration</b></td><td><a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a></td><td>Set Queue database extent size</td></tr>
+<tr><td><b>Database Cursor Operations</b></td><td><a href="../api_java/dbc_class.html">Dbc</a></td><td><b>Cursor class</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/db_cursor.html">Db.cursor</a></td><td>Create a cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_close.html">Dbc.close</a></td><td>Close a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_count.html">Dbc.count</a></td><td>Return count of duplicates</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_del.html">Dbc.del</a></td><td>Delete by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_dup.html">Dbc.dup</a></td><td>Duplicate a cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_get.html">Dbc.get</a>, <a href="../api_java/dbc_get.html">Dbc.pget</a></td><td>Retrieve by cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/dbc_put.html">Dbc.put</a></td><td>Store by cursor</td></tr>
+<tr><td><b>Key/Data Pairs</b></td><td><a href="../api_java/dbt_class.html">Dbt</a></td><td><br></td></tr>
+<tr><td><b>Bulk Retrieval</b></td><td><a href="../api_java/dbt_bulk_class.html">DbMultipleDataIterator</a></td><td><br></td></tr>
+<tr><td><br></td><td><a href="../api_java/dbt_bulk_class.html">DbMultipleKeyDataIterator</a></td><td><br></td></tr>
+<tr><td><br></td><td><a href="../api_java/dbt_bulk_class.html">DbMultipleRecnoDataIterator</a></td><td><br></td></tr>
+<tr><td><b>Lock Subsystem</b></td><td><a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a></td><td>Set lock conflicts matrix</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a></td><td>Set automatic deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a></td><td>Set maximum number of lockers</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a></td><td>Set maximum number of locks</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a></td><td>Set maximum number of lock objects</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_detect.html">DbEnv.lock_detect</a></td><td>Perform deadlock detection</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_get.html">DbEnv.lock_get</a></td><td>Acquire a lock</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_id.html">DbEnv.lock_id</a></td><td>Acquire a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_id_free.html">DbEnv.lock_id_free</a></td><td>Release a locker ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_put.html">DbEnv.lock_put</a></td><td>Release a lock</td></tr>
+<tr><td><br></td><td><a href="../api_java/lock_stat.html">DbEnv.lock_stat</a></td><td>Return lock subsystem statistics</td></tr>
+<tr><td><b>Log Subsystem</b></td><td><a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a></td><td>Set log buffer size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lg_dir.html">DbEnv.set_lg_dir</a></td><td>Set the environment logging directory</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a></td><td>Set log file size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_lg_regionmax.html">DbEnv.set_lg_regionmax</a></td><td>Set logging region size</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_archive.html">DbEnv.log_archive</a></td><td>List log and database files</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_file.html">DbEnv.log_file</a></td><td>Map Log Sequence Numbers to log files</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_flush.html">DbEnv.log_flush</a></td><td>Flush log records</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_put.html">DbEnv.log_put</a></td><td>Write a log record</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_register.html">DbEnv.log_register</a></td><td>Register a filename with the log manager</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_stat.html">DbEnv.log_stat</a></td><td>Return log subsystem statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/log_unregister.html">DbEnv.log_unregister</a></td><td>Unregister a filename with the log manager</td></tr>
+<tr><td><b>Log Cursor Operations</b></td><td><a href="../api_java/logc_class.html">DbLogc</a></td><td><b>Log cursor class</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/log_cursor.html">DbEnv.log_cursor</a></td><td>Create a log cursor handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/logc_close.html">DbLogc.logc_close</a></td><td>Close a log cursor</td></tr>
+<tr><td><br></td><td><a href="../api_java/logc_get.html">DbLogc.logc_get</a></td><td>Retrieve a log record</td></tr>
+<tr><td><b>Log Sequence Numbers</b></td><td><a href="../api_java/lsn_class.html">DbLsn</a></td><td><br></td></tr>
+<tr><td><br></td><td><a href="../api_java/log_compare.html">DbEnv.log_compare</a></td><td>Compare two Log Sequence Numbers</td></tr>
+<tr><td><b>Memory Pool Subsystem</b></td><td><a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a></td><td>Set the environment cache size</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_mp_mmapsize.html">DbEnv.set_mp_mmapsize</a></td><td>Set maximum mapped-in database file size</td></tr>
+<tr><td><br></td><td><a href="../api_java/memp_stat.html">DbEnv.memp_stat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><br></td><td><a href="../api_java/memp_trickle.html">DbEnv.memp_trickle</a></td><td>Trickle flush pages from a memory pool</td></tr>
+<tr><td><br></td><td><a href="../api_java/memp_stat.html">DbEnv.memp_fstat</a></td><td>Return memory pool statistics</td></tr>
+<tr><td><b>Transaction Subsystem</b></td><td><a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a></td><td>Set maximum number of transactions</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tx_recover.html">DbEnv.set_tx_recover</a></td><td>Set transaction abort recover function</td></tr>
+<tr><td><br></td><td><a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a></td><td>Set recovery timestamp</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a></td><td>Checkpoint the transaction subsystem</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_recover.html">DbEnv.txn_recover</a></td><td>Distributed transaction recovery</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_stat.html">DbEnv.txn_stat</a></td><td>Return transaction subsystem statistics</td></tr>
+<tr><td><b>Transactions</b></td><td><a href="../api_java/txn_class.html">DbTxn</a></td><td><b>Transaction class</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_begin.html">DbEnv.txn_begin</a></td><td>Begin a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_abort.html">DbTxn.abort</a></td><td>Abort a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_commit.html">DbTxn.commit</a></td><td>Commit a transaction</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_discard.html">DbTxn.discard</a></td><td>Discard a prepared but not resolved transaction handle</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_id.html">DbTxn.id</a></td><td>Return a transaction's ID</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_prepare.html">DbTxn.prepare</a></td><td>Prepare a transaction for commit</td></tr>
+<tr><td><br></td><td><a href="../api_java/txn_set_timeout.html">DbTxn.set_timeout</a></td><td>Set transaction timeout</td></tr>
+<tr><td><b>Replication</b></td><td><a href="../api_java/rep_transport.html">DbEnv.set_rep_transport</a></td><td>Configure replication transport</td></tr>
+<tr><td><br></td><td><a href="../api_java/rep_elect.html">DbEnv.rep_elect</a></td><td>Hold a replication election</td></tr>
+<tr><td><br></td><td><a href="../api_java/rep_message.html">DbEnv.rep_process_message</a></td><td>Process a replication message</td></tr>
+<tr><td><br></td><td><a href="../api_java/rep_start.html">DbEnv.rep_start</a></td><td>Configure an environment for replication</td></tr>
+<tr><td><b>Exceptions</b></td><td><a href="../api_java/except_class.html">DbException</a></td><td><b>Exception Class for Berkeley DB Activity</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/get_errno.html">DbException.get_errno</a></td><td>Get the error value</td></tr>
+<tr><td><br></td><td><a href="../api_java/deadlock_class.html">DbDeadlockException</a></td><td><b>Exception Class for deadlocks</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/mem_class.html">DbMemoryException</a></td><td><b>Exception Class for insufficient memory</b></td></tr>
+<tr><td><br></td><td><a href="../api_java/get_dbt.html">DbMemoryException.get_dbt</a></td><td>Get the failing Dbt</td></tr>
+<tr><td><br></td><td><a href="../api_java/runrec_class.html">DbRunRecoveryException</a></td><td><b>Exception Class for failures requiring recovery</b></td></tr>
+</table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_java/env_set_timeout.html b/db/docs/api_java/env_set_timeout.html
new file mode 100644
index 000000000..b7502961a
--- /dev/null
+++ b/db/docs/api_java/env_set_timeout.html
@@ -0,0 +1,104 @@
+<!--Id: env_set_timeout.so,v 10.2 2001/09/07 21:43:16 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void set_timeout(long timeout, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.set_timeout method sets timeout values for locks or
+transactions in the database environment. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_SET_LOCK_TIMEOUT">Db.DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this database environment.
+<p>The database environment's transaction timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_txn_timeout", one or more whitespace characters,
+and the transaction timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+<p><dt><a name="Db.DB_SET_TXN_TIMEOUT">Db.DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for transactions in this database environment.
+<p>The database environment's lock timeout value may also be set using the environment's
+<b>DB_CONFIG</b> file. The syntax of the entry in that file is a
+single line with the string "set_lock_timeout", one or more whitespace characters,
+and the lock timeout value. Because the <b>DB_CONFIG</b> file is read when the database
+environment is opened, it will silently overrule configuration done
+before that time.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+Db.DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+Db.DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values specified for the database environment may be overridden
+on a per-lock or per-transaction basis. See <a href="../api_java/lock_vec.html">DbEnv.lock_vec</a> and
+<a href="../api_java/txn_set_timeout.html">DbTxn.set_timeout</a> for more information.
+<p>The DbEnv.set_timeout interface may be used only to configure Berkeley DB before
+the <a href="../api_java/env_open.html">DbEnv.open</a> interface is called.
+<p>The DbEnv.set_timeout method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.set_timeout method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>Called after <a href="../api_java/env_open.html">DbEnv.open</a> was called.
+</dl>
+<p>The DbEnv.set_timeout method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.set_timeout method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_close.html">DbEnv.close</a>,
+<a href="../api_java/env_version.html">DbEnv.get_version_string</a>,
+<a href="../api_java/env_open.html">DbEnv.open</a>,
+<a href="../api_java/env_remove.html">DbEnv.remove</a>,
+<a href="../api_java/env_set_cachesize.html">DbEnv.set_cachesize</a>,
+<a href="../api_java/env_set_data_dir.html">DbEnv.set_data_dir</a>,
+<a href="../api_java/env_set_errcall.html">DbEnv.set_errcall</a>,
+<a href="../api_java/env_set_error_stream.html">DbEnv.set_error_stream</a>,
+<a href="../api_java/env_set_errpfx.html">DbEnv.set_errpfx</a>,
+<a href="../api_java/env_set_feedback.html">DbEnv.set_feedback</a>,
+<a href="../api_java/env_set_flags.html">DbEnv.set_flags</a>,
+<a href="../api_java/env_set_rec_init.html">DbEnv.set_recovery_init</a>,
+<a href="../api_java/env_set_rpc_server.html">DbEnv.set_rpc_server</a>,
+<a href="../api_java/env_set_shm_key.html">DbEnv.set_shm_key</a>,
+<a href="../api_java/env_set_tas_spins.html">DbEnv.set_tas_spins</a>,
+<a href="../api_java/env_set_tmp_dir.html">DbEnv.set_tmp_dir</a>,
+<a href="../api_java/env_set_timeout.html">DbEnv.set_timeout</a>,
+<a href="../api_java/env_set_verbose.html">DbEnv.set_verbose</a>,
+and <a href="../api_java/env_strerror.html">DbEnv.strerror</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_java/lock_id_free.html b/db/docs/api_java/lock_id_free.html
new file mode 100644
index 000000000..199bfc9ec
--- /dev/null
+++ b/db/docs/api_java/lock_id_free.html
@@ -0,0 +1,62 @@
+<!--Id: lock_id_free.so,v 10.1 2001/09/25 15:18:55 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.lock_id_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.lock_id_free</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int lock_id_free(int id)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.lock_id_free method frees a locker ID allocated by the
+DbEnv.lock_id method.
+<p>The DbEnv.lock_id_free method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The locker ID is invalid or locks are still held by this locker ID.
+</dl>
+<p>The DbEnv.lock_id_free method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.lock_id_free method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/lock_class.html">DbLock</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lk_conflicts.html">DbEnv.set_lk_conflicts</a>,
+<a href="../api_java/env_set_lk_detect.html">DbEnv.set_lk_detect</a>,
+<a href="../api_java/env_set_lk_max_lockers.html">DbEnv.set_lk_max_lockers</a>,
+<a href="../api_java/env_set_lk_max_locks.html">DbEnv.set_lk_max_locks</a>,
+<a href="../api_java/env_set_lk_max_objects.html">DbEnv.set_lk_max_objects</a>,
+<a href="../api_java/lock_detect.html">DbEnv.lock_detect</a>,
+<a href="../api_java/lock_get.html">DbEnv.lock_get</a>,
+<a href="../api_java/lock_id.html">DbEnv.lock_id</a>,
+<a href="../api_java/lock_id_free.html">DbEnv.lock_id_free</a>,
+<a href="../api_java/lock_put.html">DbEnv.lock_put</a>,
+and
+<a href="../api_java/lock_stat.html">DbEnv.lock_stat</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_java/log_cursor.html b/db/docs/api_java/log_cursor.html
new file mode 100644
index 000000000..5ca5cf65f
--- /dev/null
+++ b/db/docs/api_java/log_cursor.html
@@ -0,0 +1,88 @@
+<!--Id: log_cursor.so,v 10.1 2001/09/28 15:09:35 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbEnv.log_cursor</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbEnv.log_cursor</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public DbLogc log_cursor(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbEnv.log_cursor method
+creates a log cursor.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbEnv.log_cursor method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbEnv.log_cursor method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbEnv.log_cursor method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbEnv.log_cursor method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/db_class.html">Db</a>
+<h1>See Also</h1>
+<a href="../api_java/db_associate.html">Db.associate</a>,
+<a href="../api_java/db_close.html">Db.close</a>,
+<a href="../api_java/db_cursor.html">Db.cursor</a>,
+<a href="../api_java/db_del.html">Db.del</a>,
+<a href="../api_java/db_fd.html">Db.fd</a>,
+<a href="../api_java/db_get.html">Db.get</a>,
+<a href="../api_java/db_get.html">Db.pget</a>,
+<a href="../api_java/db_get_byteswapped.html">Db.get_byteswapped</a>,
+<a href="../api_java/db_get_type.html">Db.get_type</a>,
+<a href="../api_java/db_join.html">Db.join</a>,
+<a href="../api_java/db_key_range.html">Db.key_range</a>,
+<a href="../api_java/db_open.html">Db.open</a>,
+<a href="../api_java/db_put.html">Db.put</a>,
+<a href="../api_java/db_remove.html">Db.remove</a>,
+<a href="../api_java/db_rename.html">Db.rename</a>,
+<a href="../api_java/db_set_append_recno.html">Db.set_append_recno</a>,
+<a href="../api_java/db_set_bt_minkey.html">Db.set_bt_minkey</a>,
+<a href="../api_java/db_set_cachesize.html">Db.set_cachesize</a>,
+<a href="../api_java/db_set_errcall.html">Db.set_errcall</a>,
+<a href="../api_java/db_set_errpfx.html">Db.set_errpfx</a>,
+<a href="../api_java/db_set_feedback.html">Db.set_feedback</a>,
+<a href="../api_java/db_set_flags.html">Db.set_flags</a>,
+<a href="../api_java/db_set_h_ffactor.html">Db.set_h_ffactor</a>,
+<a href="../api_java/db_set_h_nelem.html">Db.set_h_nelem</a>,
+<a href="../api_java/db_set_lorder.html">Db.set_lorder</a>,
+<a href="../api_java/db_set_pagesize.html">Db.set_pagesize</a>,
+<a href="../api_java/db_set_q_extentsize.html">Db.set_q_extentsize</a>,
+<a href="../api_java/db_set_re_delim.html">Db.set_re_delim</a>,
+<a href="../api_java/db_set_re_len.html">Db.set_re_len</a>,
+<a href="../api_java/db_set_re_pad.html">Db.set_re_pad</a>,
+<a href="../api_java/db_set_re_source.html">Db.set_re_source</a>,
+<a href="../api_java/db_stat.html">Db.stat</a>,
+<a href="../api_java/db_sync.html">Db.sync</a>,
+<a href="../api_java/db_truncate.html">Db.truncate</a>,
+<a href="../api_java/db_upgrade.html">Db.upgrade</a>,
+and
+<a href="../api_java/db_verify.html">Db.verify</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_java/logc_class.html b/db/docs/api_java/logc_class.html
new file mode 100644
index 000000000..f079cb494
--- /dev/null
+++ b/db/docs/api_java/logc_class.html
@@ -0,0 +1,57 @@
+<!--Id: logc_class.so,v 1.1 2001/09/28 15:09:37 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public class DbLogc extends Object { ... }
+</pre></h3>
+<h1>Description</h1>
+<p>This manual page describes the specific details of the DbLogc class,
+which provides cursor support for log files.
+<p>The DbLogc functions are the library interface supporting
+sequential access to the records stored in log files. Cursors are
+created by calling the <a href="../api_java/log_cursor.html">DbEnv.log_cursor</a> method which returns a DbLogc object.
+<h1>Class</h1>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_dir.html">DbEnv.set_lg_dir</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/env_set_lg_regionmax.html">DbEnv.set_lg_regionmax</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_cursor.html">DbEnv.log_cursor</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>,
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>,
+<a href="../api_java/logc_close.html">DbLogc.logc_close</a>
+and
+<a href="../api_java/logc_get.html">DbLogc.logc_get</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_java/logc_close.html b/db/docs/api_java/logc_close.html
new file mode 100644
index 000000000..eea8254e0
--- /dev/null
+++ b/db/docs/api_java/logc_close.html
@@ -0,0 +1,69 @@
+<!--Id: logc_close.so,v 10.3 2001/10/02 01:33:35 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc.logc_close</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc.logc_close</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public void close(int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc.logc_close method discards the log cursor. After DbLogc.logc_close
+has been called, regardless of its return, the cursor handle may not be
+used again.
+<p>The <b>flags</b> parameter is currently unused, and must be set to 0.
+<p>The DbLogc.logc_close method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbLogc.logc_close method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The cursor was previously closed.
+</dl>
+<p>The DbLogc.logc_close method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLogc.logc_close method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_dir.html">DbEnv.set_lg_dir</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/env_set_lg_regionmax.html">DbEnv.set_lg_regionmax</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_cursor.html">DbEnv.log_cursor</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>,
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>,
+<a href="../api_java/logc_close.html">DbLogc.logc_close</a>
+and
+<a href="../api_java/logc_get.html">DbLogc.logc_get</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_java/logc_get.html b/db/docs/api_java/logc_get.html
new file mode 100644
index 000000000..2269a8a4b
--- /dev/null
+++ b/db/docs/api_java/logc_get.html
@@ -0,0 +1,112 @@
+<!--Id: logc_get.so,v 10.30 2001/09/29 15:48:08 dda Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbLogc.logc_get</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbLogc.logc_get</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int get(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbLogc.logc_get method retrieve records from the log according to the
+<b>lsn</b> and <b>flags</b> arguments.
+<p>The data field of the <b>data</b> structure is set to the record
+retrieved, and the size field indicates the number of bytes in the
+record. See <a href="../api_java/dbt_class.html">Dbt</a> for a description of other fields in the
+<b>data</b> structure. The <a href="../api_java/dbt_class.html#DB_DBT_MALLOC">Db.DB_DBT_MALLOC</a>,
+<a href="../api_java/dbt_class.html#DB_DBT_REALLOC">Db.DB_DBT_REALLOC</a> and <a href="../api_java/dbt_class.html#DB_DBT_USERMEM">Db.DB_DBT_USERMEM</a> flags may be specified
+for any <a href="../api_java/dbt_class.html">Dbt</a> used for data retrieval.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_CHECKPOINT">Db.DB_CHECKPOINT</a><dd>The last record written with the DB_CHECKPOINT flag specified to the
+<a href="../api_java/log_put.html">DbEnv.log_put</a> method is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the record
+returned. If no record has been previously written with the DB_CHECKPOINT
+flag specified, the first record in the log is returned.
+<p>If the log is empty, the DbLogc.logc_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_FIRST">Db.DB_FIRST</a><dd>The first record from any of the log files found in the log directory
+is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the log is empty, the DbLogc.logc_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_LAST">Db.DB_LAST</a><dd>The last record in the log is returned in the <b>data</b> argument.
+The <b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the
+record returned.
+<p>If the log is empty, the DbLogc.logc_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p><dt><a name="Db.DB_NEXT">Db.DB_NEXT</a>, <a name="Db.DB_PREV">Db.DB_PREV</a><dd>The current log position is advanced to the next (previous) record in
+the log, and that record is returned in the <b>data</b> argument. The
+<b>lsn</b> argument is overwritten with the <a href="../api_java/lsn_class.html">DbLsn</a> of the record
+returned.
+<p>If the pointer has not been initialized via DB_FIRST, DB_LAST, DB_SET,
+DB_NEXT, or DB_PREV, DbLogc.logc_get will return the first (last) record
+in the log. If the last (first) log record has already been returned
+or the log is empty, the DbLogc.logc_get method will return <a href="../ref/program/errorret.html#DB_NOTFOUND">Db.DB_NOTFOUND</a>.
+<p>If the log was opened with the DB_THREAD flag set, calls to
+DbLogc.logc_get with the DB_NEXT (DB_PREV) flag set will return
+EINVAL.
+<p><dt><a name="Db.DB_CURRENT">Db.DB_CURRENT</a><dd>Return the log record to which the log currently refers.
+<p>If the log pointer has not been initialized via DB_FIRST, DB_LAST,
+DB_SET, DB_NEXT, or DB_PREV, or if the log was opened with the DB_THREAD
+flag set, DbLogc.logc_get will return EINVAL.
+<p><dt><a name="Db.DB_SET">Db.DB_SET</a><dd>Retrieve the record specified by the <b>lsn</b> argument. If the
+specified <a href="../api_java/lsn_class.html">DbLsn</a> is invalid (for example, it does not appear in
+the log) DbLogc.logc_get will return EINVAL.
+</dl>
+<p>Otherwise, the DbLogc.logc_get method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbLogc.logc_get method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+<p>The DB_CURRENT flag was set and the log pointer had not yet been
+initialized.
+<p>The DB_SET flag was set and the specified log sequence number does not
+exist.
+</dl>
+<p>The DbLogc.logc_get method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbLogc.logc_get method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbenv_class.html">DbEnv</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_lg_bsize.html">DbEnv.set_lg_bsize</a>,
+<a href="../api_java/env_set_lg_dir.html">DbEnv.set_lg_dir</a>,
+<a href="../api_java/env_set_lg_max.html">DbEnv.set_lg_max</a>,
+<a href="../api_java/env_set_lg_regionmax.html">DbEnv.set_lg_regionmax</a>,
+<a href="../api_java/log_archive.html">DbEnv.log_archive</a>,
+<a href="../api_java/log_compare.html">DbEnv.log_compare</a>,
+<a href="../api_java/log_cursor.html">DbEnv.log_cursor</a>,
+<a href="../api_java/log_file.html">DbEnv.log_file</a>,
+<a href="../api_java/log_flush.html">DbEnv.log_flush</a>,
+<a href="../api_java/log_put.html">DbEnv.log_put</a>,
+<a href="../api_java/log_register.html">DbEnv.log_register</a>,
+<a href="../api_java/log_stat.html">DbEnv.log_stat</a>,
+<a href="../api_java/log_unregister.html">DbEnv.log_unregister</a>,
+<a href="../api_java/logc_close.html">DbLogc.logc_close</a>
+and
+<a href="../api_java/logc_get.html">DbLogc.logc_get</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/api_java/txn_set_timeout.html b/db/docs/api_java/txn_set_timeout.html
new file mode 100644
index 000000000..9279d3b8f
--- /dev/null
+++ b/db/docs/api_java/txn_set_timeout.html
@@ -0,0 +1,82 @@
+<!--Id: txn_set_timeout.so,v 10.2 2001/09/07 21:43:17 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB: DbTxn.set_timeout</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td>
+<h1>DbTxn.set_timeout</h1>
+</td>
+<td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<hr size=1 noshade>
+<tt>
+<h3><pre>
+import com.sleepycat.db.*;
+<p>
+public int set_timeout(long timeout, int flags)
+ throws DbException;
+</pre></h3>
+<h1>Description</h1>
+<p>The DbTxn.set_timeout method sets timeout values for locks or
+transactions for the specified transaction. The timeout value is
+currently specified as an unsigned 32-bit number of microseconds,
+limiting the maximum timeout to roughly 71 minutes.
+<p>The <b>flags</b> value must be set to one of the following values:
+<p><dl compact>
+<p><dt><a name="Db.DB_SET_LOCK_TIMEOUT">Db.DB_SET_LOCK_TIMEOUT</a><dd>Set the timeout value for locks in this transaction.
+<p><dt><a name="Db.DB_SET_TXN_TIMEOUT">Db.DB_SET_TXN_TIMEOUT</a><dd>Set the timeout value for this transaction.
+</dl>
+<p>Timeouts are checked whenever a thread of control blocks on a lock or
+when deadlock detection is performed. (In the case of
+Db.DB_SET_LOCK_TIMEOUT, the lock is one requested explicitly
+through the Lock subsystem interfaces. In the case of
+Db.DB_SET_TXN_TIMEOUT, the lock is one requested on behalf of a
+transaction. In either case, it may be a lock requested by the database
+access methods underlying the application.) As timeouts are only
+checked when the lock request first blocks or when deadlock detection
+is performed, the accuracy of the timeout depends on how often deadlock
+detection is performed.
+<p>Timeout values may be specified for the database environment as a whole.
+See <a href="../api_java/env_set_timeout.html">DbEnv.set_timeout</a> and for more information.
+<p>The DbTxn.set_timeout method throws an exception that encapsulates a non-zero error value on
+failure.
+<h1>Errors</h1>
+<p>The DbTxn.set_timeout method may fail and throw an exception encapsulating a non-zero error for the following conditions:
+<p><dl compact>
+<p><dt>EINVAL<dd>An invalid flag value or parameter was specified.
+</dl>
+<p>The DbTxn.set_timeout method may fail and throw an exception for errors specified for other Berkeley DB and C library or system methods.
+If a catastrophic error has occurred, the DbTxn.set_timeout method may fail and throw
+a <a href="../api_java/runrec_class.html">DbRunRecoveryException</a>, in which case all subsequent Berkeley DB calls
+will fail in the same way.
+<h1>Class</h1>
+<a href="../api_java/dbenv_class.html">DbEnv</a>, <a href="../api_java/txn_class.html">DbTxn</a>
+<h1>See Also</h1>
+<a href="../api_java/env_set_tx_max.html">DbEnv.set_tx_max</a>,
+<a href="../api_java/env_set_tx_timestamp.html">DbEnv.set_tx_timestamp</a>,
+<a href="../api_java/txn_abort.html">DbTxn.abort</a>,
+<a href="../api_java/txn_begin.html">DbEnv.txn_begin</a>,
+<a href="../api_java/txn_checkpoint.html">DbEnv.txn_checkpoint</a>,
+<a href="../api_java/txn_commit.html">DbTxn.commit</a>,
+<a href="../api_java/txn_discard.html">DbTxn.discard</a>,
+<a href="../api_java/txn_id.html">DbTxn.id</a>,
+<a href="../api_java/txn_prepare.html">DbTxn.prepare</a>,
+<a href="../api_java/txn_recover.html">DbEnv.txn_recover</a>,
+<a href="../api_java/txn_set_timeout.html">DbTxn.set_timeout</a>
+and
+<a href="../api_java/txn_stat.html">DbEnv.txn_stat</a>.
+</tt>
+<table width="100%"><tr><td><br></td><td align=right>
+<a href="../api_java/c_index.html"><img src="../images/api.gif" alt="API"></a><a href="../reftoc.html"><img src="../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/build_unix/macosx.html b/db/docs/ref/build_unix/macosx.html
new file mode 100644
index 000000000..783a41140
--- /dev/null
+++ b/db/docs/ref/build_unix/macosx.html
@@ -0,0 +1,28 @@
+<!--Id: macosx.so,v 11.1 2001/09/21 16:23:52 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Mac OS X</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for UNIX systems</dl></h3></td>
+<td align=right><a href="../../ref/build_unix/linux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/osf1.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Mac OS X</h1>
+<p><ol>
+<p><li><b>I can't use the Berkeley DB Tcl and Java APIs on Mac OS X.</b>
+<p>The Berkeley DB configuration and build procedure is unable to dynamically
+load libraries on Mac OS X. This restriction is expected to be removed
+in future Berkeley DB releases.
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_unix/linux.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_unix/osf1.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/build_vxworks/introae.html b/db/docs/ref/build_vxworks/introae.html
new file mode 100644
index 000000000..4441aab5a
--- /dev/null
+++ b/db/docs/ref/build_vxworks/introae.html
@@ -0,0 +1,137 @@
+<!--Id: introae.so,v 1.3 2001/10/12 19:21:26 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building for VxWorks AE</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Building Berkeley DB for VxWorks systems</dl></h3></td>
+<td align=right><a href="../../ref/build_vxworks/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building for VxWorks AE</h1>
+<p>The build_vxworks directory in the Berkeley DB distribution contains component
+files for Tornado 3.1. The Berkeley DB distribution also contains workspace
+and project files for Tornado 2.0. See
+<a href="../../ref/build_vxworks/intro.html">Building for VxWorks</a> for
+information about Tornado 2.0/VxWorks 5.4.
+The VxWorks AE component files are all within subdirectories, and
+all component files are named <i>component.wpj</i>.
+<p><table border=1 align=center>
+<tr><th>File</th><th>Description</th></tr>
+<tr> <td align=left>Berkeley DB/</td> <td align=left>Berkeley DB component directory</td> </tr>
+<tr> <td align=left>demo/demo</td> <td align=left>Demo program component directory</td> </tr>
+<tr> <td align=left>db_*/db_*</td> <td align=left><a href="../../utility/index.html">Support utilities</a> component directories</td> </tr>
+</table>
+<h3>Building With Tornado 3.1</h3>
+<p>This document assumes you already have a workspace set up and you
+want to add Berkeley DB as a component in that workspace. You may include
+this component in any domain deemed appropriate for your application.
+<p>To add the Berkeley DB component into your workspace, right-click on the
+"Components" and choose <i>Add existing...</i>. Enter
+the pathname to your Berkeley DB distribution in the form
+<i>&lt;pathname to your Berkeley DB distribution&gt;</i>
+<b>/build_vxworks/Berkeley DB/</b>.
+You will see <b>component.wpj</b> listed under the "Files".
+Choose that filename and click "Ok". You will now have
+a Berkeley DB component in your workspace.
+<p>There are essentially three options regarding protection
+domains and the Berkeley DB component. The first option is to add
+the Berkeley DB component directly into your application domain. You may
+choose to do this by downloading Berkeley DB into that domain on your
+target, or by adding the component to the domain itself and it will
+be built when the application domain is built. The disadvantage
+of this option is that no other application domain will have access to
+the Berkeley DB interfaces.
+<p>The second option is to add the Berkeley DB component directly into your
+<i>vxKernel</i> domain. The advantage is that any application
+using the Berkeley DB interface would have access to them and no changes
+would be necessary to the linkage path. The disadvantage is that
+all Berkeley DB code would run with system privileges.
+<p>The third option is to add a Berkeley DB shared library domain to your
+system. Then add or download the Berkeley DB component to that shared
+library domain. The advantage is that all application domains
+using the Berkeley DB interfaces can access a single copy of the library
+running in user mode. The disadvantages are that one must
+remember to add the Berkeley DB shared library domain to the linkage
+path of every application domain using Berkeley DB and that shared library
+domains may not link against one another, a consideration if the
+application using Berkeley DB is itself a shared library.
+<p>We believe the options outlined above are the most common methods
+that the Berkeley DB component will be used. We believe that the third
+option, creating a shared library domain, is the most useful option.
+Ultimately, the responsibility of choosing the correct mechanism
+for including the Berkeley DB component into the appropriate domain falls
+to the application developer.
+<p>The remainder of this document assumes that you already have a
+VxWorks AE target and a target server, both up and running. It also
+assumes that your VxWorks AE image is configured properly for your
+needs. It also assumes that you
+have an acceptable file system already available. See
+<a href="../../ref/build_vxworks/faq.html">VxWorks FAQ</a> for more
+information about file system requirements.
+<p>To build Berkeley DB, first, you need to set the build selection. To do
+this, right-click on the Berkeley DB component name and choose the
+<i>Build settings...</i> selection. If you look at the
+<i>Active Build Specification</i> drop down list, you will see
+several different builds, containing different configurations.
+<p><table border=1 align=center>
+<tr><th>Build</th><th>Description</th></tr>
+<tr> <td align=left>PENTIUM2gnu.debug</td> <td align=left>PII BSP with debugging</td> </tr>
+<tr> <td align=left>PENTIUM2gnu.release</td> <td align=left>PII BSP no debugging</td> </tr>
+</table>
+<p>You have to add a new build specification if you use a
+different BSP, want to add a build for the simulator or
+want to customize further. For instance, if you have the Power PC (PPC)
+BSP, you need to add a new build for the PPC tool chain. To do so,
+select the "Add..." button in the <i>Build Settings</i> window.
+A new window will appear giving you a list of all the BSPs
+you have available from which to choose.
+For your new build target, you need to decide whether it should be built for
+debugging. See the <i>C/C++ compiler</i> tab of the Pentium
+builds for ways to
+configure for each case. After you add this build, you still need
+to configure the include directories correctly, as described in the
+sections that follow.
+<p>If you are running with a different
+BSP, you should remove the build specifications that do not apply to
+your hardware. We recommend that you do this after you configure any
+new build specifications first.
+<p>If you are adding a new build you must set the include directories
+correctly. After you have added the new build in the "Build Settings"
+window, click on the <i>C/C++ compiler</i> tab.
+In the edit box, you need to add the pathname of the
+<i>build_vxworks</i> subdirectory of Berkeley DB, followed by the
+pathname of the <i>include</i> and <i>include_auto</i>
+subdirectories of Berkeley DB. You should add these directories
+relative to the project directory, using the <b>PRJ_DIR</b>
+macro, which is the Berkeley DB subdirectory of <i>build_vxworks</i>.
+Then, click OK. The typical addition of include
+directories will look like:
+<p><blockquote><pre>-I$(PRJ_DIR)/.. -I$(PRJ_DIR)/../../include -I$(PRJ_DIR)/../../include_auto</pre></blockquote>
+<p>If the architecture for this new build has the most significant byte
+first, you also need to edit the <i>db_config.h</i> file in the
+build directory and define <b>WORDS_BIGENDIAN</b>.
+<p>To build and download the Berkeley DB downloadable application for the first time
+requires several steps:
+<p><ol>
+<p><li>Select the build you are interested in using the <i>Build Settings</i>
+window. Click OK when done.
+<p><li>Select the Berkeley DB component and right-click. Choose the
+<i>Clean Build</i> selection.
+<p><li>Select the Berkeley DB component and right-click. Choose the
+<i>Download...</i> selection.
+</ol>
+<p>You need to repeat this procedure for all builds you are interested in
+building, as well as for all of the utility project builds you want to
+run.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/build_vxworks/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/build_vxworks/notes.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/lock/timeout.html b/db/docs/ref/lock/timeout.html
new file mode 100644
index 000000000..063f9fce2
--- /dev/null
+++ b/db/docs/ref/lock/timeout.html
@@ -0,0 +1,58 @@
+<!--Id: timeout.so,v 1.1 2001/08/18 17:05:29 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Deadlock detection using timers</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a><a name="3"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Locking Subsystem</dl></h3></td>
+<td align=right><a href="../../ref/lock/dead.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/page.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Deadlock detection using timers</h1>
+<p>Lock and transaction timeouts may be used in place of, or in addition
+to, regular deadlock detection. If lock timeouts are set, lock requests
+will return <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a> from a lock call when it is
+detected that the locks timeouts has expired, that is, the lock request
+has blocked, waiting, longer than the specified timeout. If transaction
+timeouts are set, lock requests will return <a href="../../ref/program/errorret.html#DB_LOCK_DEADLOCK">DB_LOCK_DEADLOCK</a>
+from a lock call when it has been detected that the transaction has been
+active longer than the specified timeout.
+<p>As lock and transaction timeouts are only checked when lock requests
+first block or when deadlock detection is performed, the accuracy of
+the timeout depends on how often deadlock detection is performed. More
+specifically, transactions will continue to run after their timeout has
+expired if they do not block on a lock request after that time.
+<p>If the database environment deadlock detector has been configured with
+the <a href="../../api_c/env_set_lk_detect.html#DB_LOCK_EXPIRE">DB_LOCK_EXPIRE</a> option, timeouts are the only mechanism by
+which deadlocks will be broken. If the deadlock detector has been
+configured with a different option, then regular deadlock detection will
+be performed, and in addition, if timeouts have also been specified,
+lock requests and transactions will time out as well.
+<p>Lock and transaction timeouts may be specified on a database environment
+wide basis using the <a href="../../api_c/env_set_timeout.html">DB_ENV-&gt;set_timeout</a> function. Lock timeouts may be
+specified on a per-lock request basis using the <a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a> function.
+Transaction timeouts may be specified on a per-transaction basis using
+the <a href="../../api_c/txn_set_timeout.html">DB_TXN-&gt;set_timeout</a> function. Per-lock and per-transaction timeouts
+supersede environment wide timeouts.
+<p>For example, consider that the environment wide transaction timeout has
+been set to 20ms, the environment wide lock timeout has been set to
+10ms, a transaction has been created in this environment and its timeout
+value set to 8ms, and a specific lock request has been made on behalf
+of this transaction where the lock timeout was set to 4ms. By default,
+transactions in this environment will be timed out if they block waiting
+for a lock after 20ms. The specific transaction described will be timed
+out if it blocks waiting for a lock after 8ms. By default, any lock
+request in this system will be timed out if it blocks longer than 10ms,
+and the specific lock described will be timed out if it blocks longer
+than 4ms.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/lock/dead.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/lock/page.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/app.html b/db/docs/ref/rep/app.html
new file mode 100644
index 000000000..cc4ca9bdf
--- /dev/null
+++ b/db/docs/ref/rep/app.html
@@ -0,0 +1,73 @@
+<!--Id: app.so,v 1.1 2001/10/13 19:56:23 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building replicated applications</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/pri.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/comm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building replicated applications</h1>
+<p>The simplest way to build a replicated Berkeley DB application is to first
+build (and debug!) the transactional version of the same application.
+Then, add a thin replication layer to the application. Highly available
+applications use the following additional four Berkeley DB methods:
+<a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a>, <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a>, <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> and
+<a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a>:
+<p><dl compact>
+<p><dt><a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a><dd>The <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> function configures (or reconfigures) an existing database
+environment to be a replication master or client.
+<p><dt><a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a><dd>The <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> function causes the replication group to elect a new
+master; it is called whenever contact with the master is lost.
+<p><dt><a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a><dd>The <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function is used to process incoming messages from other
+environments in the replication group. For clients, it is responsible
+for accepting log records and updating the local databases based on
+messages from the master. For both the master and the clients, it is
+responsible for handling administrative functions (for example, the
+protocol for dealing with lost messages), and permitting new clients to
+join an active replication group.
+<p><dt><a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a><dd>The <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> function configures the replication system's
+communications infrastructure.
+</dl>
+<p>To add replication to a Berkeley DB application, application initialization
+must be changed, and some new code, the application's communications
+infrastructure, must be written. The application initialization changes
+are relatively simple, but the communications infrastructure code is
+fairly complex.
+<p>During application initialization, the application must perform two
+additional tasks: first, it must provide Berkeley DB information about its
+communications infrastructure, and second, it must start the Berkeley DB
+replication system. Generally, a replicated application will do normal
+Berkeley DB recovery and configuration, exactly like any other transactional
+application. Then, once the database environment has been opened, it
+will call the <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> function to configure Berkeley DB for replication,
+and then will call the <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> function to join or create the
+replication group. When calling <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>, the application has
+two choices: specifically configure the master for the replication
+group, or, alternatively, configure all group members as clients and
+then call an election, letting the clients select the master from among
+themselves. Either is correct, and the choice is entirely up to the
+application.
+<p>Databases are generally opened read-write on both clients and masters
+in order to simplify upgrading replication clients to be masters. (If
+databases are opened read-only on clients, and the client is then
+upgraded to be the master, the client would have to close and reopen
+all of its databases in order to support database update queries.)
+However, even though the database is opened read-write on the client,
+any attempt to update it will result in an error until the client is
+reconfigured as a master.
+<p>There are no additional interface calls required to shut down a database
+environment participating in a replication group. The application
+should shut down the environment in the usual manner, by calling the
+<a href="../../api_c/env_close.html">DB_ENV-&gt;close</a> function.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/pri.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/comm.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/comm.html b/db/docs/ref/rep/comm.html
new file mode 100644
index 000000000..a58a11e0b
--- /dev/null
+++ b/db/docs/ref/rep/comm.html
@@ -0,0 +1,71 @@
+<!--Id: comm.so,v 1.1 2001/10/13 19:56:23 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Building the communications infrastructure</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/app.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/newsite.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Building the communications infrastructure</h1>
+<p>Replicated applications are typically written with one or more threads
+of control looping on one or more communication channels. These threads
+accept messages from remote environments for the local database
+environment, and accept messages from the local environment for remote
+environments. Messages from remote environments are passed to the local
+database environment using the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function. Messages from the
+local environment are passed to the application for transmission using
+the callback interface specified to the <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> function.
+<p>Both clients and servers establish communication channels by calling
+the <a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> function. This method specifies the <b>send</b>
+interface, a callback interface used by Berkeley DB for sending messages to
+other database environments in the replication group. The <b>send</b>
+interface takes an environment ID and two opaque data objects. It is
+the responsibility of the <b>send</b> interface to transmit the
+information in the two data objects to the database environment
+corresponding to the ID, with the receiving application then calling
+the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function to process the message.
+<p>The details of the transport mechanism are left entirely to the
+application; the only requirement is that the data buffer and size of
+each of the control and rec <a href="../../api_c/dbt.html">DBT</a>s passed to the <b>send</b>
+function on the sending site be faithfully copied and delivered to the
+receiving site by means of a call to <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> with
+corresponding arguments. The <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function is free-threaded; it
+is safe to deliver any number of messages simultaneously, and from any
+arbitrary thread or process in the Berkeley DB environment.
+<p>There are a number of informational returns from the
+<a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function:
+<p><dl compact>
+<p><dt><a href="../../api_c/rep_message.html#DB_REP_HOLDELECTION">DB_REP_HOLDELECTION</a><dd>When <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_HOLDELECTION">DB_REP_HOLDELECTION</a>, it means
+that another database environment in the replication group has called
+for an election. The application should call the <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> function.
+<p><dt><a href="../../api_c/rep_message.html#DB_REP_NEWMASTER">DB_REP_NEWMASTER</a><dd>When <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_NEWMASTER">DB_REP_NEWMASTER</a>, it means that
+a new master has been elected. The call will also return the local
+environment's ID for that master. If the ID of the master has changed,
+the application may need to reconfigure itself (for example, to redirect
+update queries to the new master rather then the old one). If the new
+master is the local environment, then the application must call the
+<a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> function, and to reconfigure the supporting Berkeley DB library as
+a replication master.
+<p><dt><a href="../../api_c/rep_message.html#DB_REP_NEWSITE">DB_REP_NEWSITE</a><dd>When <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_NEWSITE">DB_REP_NEWSITE</a>, it means that
+a message from a previously unknown member of the replication group has
+been received. The application should reconfigure itself as necessary
+so it is able to send messages to this site.
+<p><dt><a href="../../api_c/rep_message.html#DB_REP_OUTDATED">DB_REP_OUTDATED</a><dd>When <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_OUTDATED">DB_REP_OUTDATED</a>, it means that
+the environment has been out-of-contact with the master for too long a
+time, and the master no longer has sufficient logs in order to bring the
+local client up-to-date. The application should shut down, and the
+client reinitialized (see <a href="../../ref/rep/init.html">Initializing
+a new site</a> for more information.)
+</dl>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/app.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/newsite.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/elect.html b/db/docs/ref/rep/elect.html
new file mode 100644
index 000000000..8aa9bd6a6
--- /dev/null
+++ b/db/docs/ref/rep/elect.html
@@ -0,0 +1,71 @@
+<!--Id: elect.so,v 1.1 2001/10/13 19:56:23 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Elections</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/init.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/logonly.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Elections</h1>
+<p>Berkeley DB never initiates elections, that is the responsibility of the
+application. It is not dangerous to hold an election, as the Berkeley DB
+election process ensures there is never more than a single master
+environment. Clients should initiate an election whenever they lose
+contact with the master environment, whenever see a return of
+<a href="../../api_c/rep_message.html#DB_REP_HOLDELECTION">DB_REP_HOLDELECTION</a> from the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function, or when, for
+whatever reason, they do not know who the master is.
+<p>For a client to become the master, the client must win an election. To
+win an election, the replication group must currently have no master
+and the client must have the highest priority of the database
+environments participating in the election. It is dangerous to
+configure more than one master environment using the <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> function,
+and applications should be careful not to do so. Applications should
+only configure themselves as the master environment if they are the only
+possible master, or if they have won an election. An application can
+only know it has won an election if the <a href="../../api_c/rep_elect.html">DB_ENV-&gt;rep_elect</a> function returns
+success and the local database environment's ID as the new master
+environment ID, or if the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function returns
+<a href="../../api_c/rep_message.html#DB_REP_NEWMASTER">DB_REP_NEWMASTER</a> and the local database environment's ID as the
+new master environment ID.
+<p>To add a database environment to the replication group with the intent
+of it becoming the master, first add it as a client. Since it may be
+out-of-date with respect to the current master, allow it to update
+itself from the current master. Then, shut the current master down.
+Presumably, the added client will win the subsequent election. If the
+client does not win the election, it is likely that it was not given
+sufficient time to update itself with respect to the current master.
+<p>If a client is unable to find a master or win an election, it means that
+the network has been partitioned and there are not enough environments
+participating in the election for one of the participants to win. In
+this case, the application should periodically hold an election until
+a master is found or an election is won. In desperate circumstances,
+an application could simply declare itself the master by calling
+<a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a>, or by reducing the number of participants required
+to win an election until the election is won. Neither of these
+solutions is recommended: in the case of a network partition, either of
+these choices can result in there being two masters in one replication
+group, and the databases in the environment might irretrievably diverge
+as they are modified in different ways by the masters.
+<p>Finally, it is possible for the wrong database environment to win an
+election if a number of systems crash at the same time. Because an
+election winner is declared as soon as enough environments participate
+in the election, the environment on a slow booting but well-connected
+machine might lose to an environment on a badly connected but faster
+booting machine. In the case of a number of environments crashing at
+the same time (for example, a set of replicated servers in a single
+machine room), applications should bring the database environments on
+line as clients initially (which will allow them to process read queries
+immediately), and then hold an election after sufficient time has passed
+for the slower booting machines to catch up.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/init.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/logonly.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/faq.html b/db/docs/ref/rep/faq.html
new file mode 100644
index 000000000..3670df8e9
--- /dev/null
+++ b/db/docs/ref/rep/faq.html
@@ -0,0 +1,77 @@
+<!--Id: faq.so,v 1.1 2001/10/13 19:56:23 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Replication FAQ</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/trans.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Replication FAQ</h1>
+<p><ol>
+<p><li><b>Does Berkeley DB provide support for sending database update messages
+from the client application to the master?</b>
+<p>No, it does not. The Berkeley DB RPC server code could be modified to support
+this functionality, but in general this protocol is left entirely to
+the application. Note, there is no reason not to use the communications
+channels the application establishes for replication support to forward
+database update messages to the master, Berkeley DB does not require that
+those channels be used exclusively for replication messages.
+<p><li><b>Can I use replication to partition my environment across
+multiple sites?</b>
+<p>No, this is not possible. All replicated databases must be equally
+shared by all environments in the replication group.
+<p><li><b>What happens if an environment with out-of-date information
+wins the election?</b>
+<p>This is unlikely, but it is possible, and the outcome can be the loss
+of information. For example, consider a system with a master
+environment and two clients A and B, where client A may upgrade to
+master status and client B cannot. Then, assume client A is partitioned
+from the other two database environments, and it becomes out-of-date
+with respect to the master. Then, assume the master crashes and does
+not come back on-line. Subsequently, the network partition is restored,
+and clients A and B hold an election. As client B cannot win the
+election, client A will win by default, and in order to get back into
+sync with client B, it will unroll possibly committed transactions on
+client B until they can once again move forward together.
+<p>This scenario stresses the importance of good network infrastructure in
+your replicated environments. When replicating database environments
+over sufficiently lossy networking, the best solution is usually to pick
+a single master, and only hold elections when human intervention has
+determined the selected master is unable to recover.
+<p><li><b>How can I distinguish Berkeley DB messages from application messages?</b>
+<p>There is no way to distinguish Berkeley DB messages from application-specific
+messages, nor does Berkeley DB offer any way to wrap application messages
+inside of Berkeley DB messages. Distributed applications exchanging their
+own messages should either enclose Berkeley DB messages in their own wrappers,
+or use separate network connections to send and receive Berkeley DB messages.
+The one exception to this rule is connection information for new sites;
+Berkeley DB offers a simple method for sites joining replication groups to
+send connection information to the other database environments in the
+group (see <a href="../../ref/rep/newsite.html">Connecting to a new site</a>
+for more information).
+<p><li><b>How should I build my <b>send</b> function?</b>
+<p>This depends on the specifics of the application. One common way is
+to write the <b>rec</b> and <b>control</b> arguments' sizes and data
+to a socket connected to each remote site.
+<p><li><b>Can I use replication to replicate just the database
+environment's log files?</b>
+<p>Not explicitly. However, a client replica will contain a full set of
+logs as generated by the master, within the semantic limits of the
+transport mechanism. In the event that a master crashes, the client
+environment may be used directly (after running recovery) or for
+catastrophic recovery on the master site.
+<p><font color=red>There's a DB_REP_LOGSONLY flag -- so this is wrong, I
+think.</font>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/trans.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/xa/intro.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/id.html b/db/docs/ref/rep/id.html
new file mode 100644
index 000000000..d470db350
--- /dev/null
+++ b/db/docs/ref/rep/id.html
@@ -0,0 +1,37 @@
+<!--Id: id.so,v 1.2 2001/10/13 19:56:23 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Replication environment IDs</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/pri.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Replication environment IDs</h1>
+<p>Each database environment included in a replication group must have a
+unique identifier for itself and for the other members of the
+replication group. The identifiers do not need to be global, that is,
+each instance of a running application can assign local identifiers to
+members of the replication group as they find out about them. For
+example, given three sites: A, B and C, site A might assign the
+identifiers 1 and 2 to sites B and C respectively, while site B might
+assign the identifiers 301 and 302 to sites A and C respectively. Note,
+it is not wrong to have global identifiers, of course, it is just not
+necessary.
+<p>It is the responsibility of the application to label each incoming
+replication message passed to <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function with the appropriate
+identifier. Subsequently, Berkeley DB will label outgoing messages to the
+<b>send</b> interface with those same identifiers.
+<p>Negative identifiers are reserved for use by Berkeley DB, and should never be
+assigned to environments by the application.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/pri.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/init.html b/db/docs/ref/rep/init.html
new file mode 100644
index 000000000..0ab90224d
--- /dev/null
+++ b/db/docs/ref/rep/init.html
@@ -0,0 +1,44 @@
+<!--Id: init.so,v 1.1 2001/10/13 19:56:24 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Initializing a new site</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/newsite.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/elect.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Initializing a new site</h1>
+<p>Perform the following steps to add a new site to the replication
+group:
+<p><ol>
+<p><li>Perform a hot backup of the master's environment, as described in
+<a href="../../ref/transapp/archival.html">Database and log file archival</a>.
+<p><li>Copy the hot backup to the client.
+<p><li>Run ordinary (non-catastrophic) recovery on the client's new
+environment, using either the <a href="../../utility/db_recover.html">db_recover</a> utility or the
+<a href="../../api_c/env_open.html#DB_RECOVER">DB_RECOVER</a> flag to <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a>.
+<p><li>Reconfigure and reopen the environment as a client member of the
+replication group.
+</ol>
+<p>If copying the hot backup to the client takes a long time relative to
+the frequency with which log files are reclaimed using the
+<a href="../../utility/db_archive.html">db_archive</a> utility or the <a href="../../api_c/log_archive.html">DB_ENV-&gt;log_archive</a> function, it may be
+necessary to suppress log reclamation until the newly restarted client
+has "caught up" and applied all log records generated during its
+downtime.
+<p>As with any Berkeley DB application, the database environment must be in a
+consistent state at application startup. This is most easily assured
+by running recovery at startup time in one thread or process; it is
+harmless to do this on both clients and masters even when not strictly
+necessary.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/newsite.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/elect.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/intro.html b/db/docs/ref/rep/intro.html
new file mode 100644
index 000000000..e0d670f98
--- /dev/null
+++ b/db/docs/ref/rep/intro.html
@@ -0,0 +1,57 @@
+<!--Id: intro.so,v 1.2 2001/10/13 20:21:47 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/transapp/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/id.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Introduction</h1>
+<p>Berkeley DB includes support for building highly available applications based
+on single-master (read-only) replication. Berkeley DB replication groups
+consist of some number of independently configured database
+environments. The database environments might be on separate computers,
+on separate boards in a computer with replicated hardware, or on
+separate disks in a single computer. The environments may be accessed
+by any number of applications, from one to many. As always with Berkeley DB
+environments, any number of concurrent processes or threads may access
+the database environment.
+<p>Berkeley DB replication groups contain a single <i>master</i> database
+environment and one or more <i>client</i> database environments.
+Master environments support both database reads and writes; client
+environments support only database reads. If the master environment
+fails, applications may upgrade a client to be the new master.
+<p>Applications may be written to provide various degrees of consistency
+between the master and clients. The system can be run synchronously
+such that replicas are guaranteed to be up-to-date with all committed
+transactions, but doing so may incur a significant performance penalty.
+Higher performance solutions sacrifice total consistency, allowing the
+clients to be out of date for an application-controlled amount of time.
+<p>While Berkeley DB includes the tools necessary to construct highly available
+database environments, applications must provide several critical
+components. First, the application is responsible for providing the
+communication infrastructure. Applications may use whatever wire
+protocol is appropriate for their application (for example, RPC, TCP/IP,
+UDP, VI, message-passing over the backplane, etc.). Second, the
+application is responsible for naming. Berkeley DB refers to the members of
+a replication group using an application-provided ID, and applications
+must map that ID to a particular database environment or communication
+channel. Third, the application is responsible for monitoring the status
+of the master and clients, and identifying any unavailable database
+environments. Finally, the application must provide whatever security
+policies are needed. For example, the application may choose to encrypt
+data, use a secure socket layer, or do nothing at all. The level of
+security is left to the sole discretion of the application.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/faq.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/id.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/logonly.html b/db/docs/ref/rep/logonly.html
new file mode 100644
index 000000000..cb1fac5b4
--- /dev/null
+++ b/db/docs/ref/rep/logonly.html
@@ -0,0 +1,22 @@
+<!--Id: logonly.so,v 1.1 2001/10/13 19:56:24 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Log file only clients</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/elect.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/trans.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Log file only clients</h1>
+<p><font color=red>Needs to be written.</font>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/elect.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/trans.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/newsite.html b/db/docs/ref/rep/newsite.html
new file mode 100644
index 000000000..3856bbe7e
--- /dev/null
+++ b/db/docs/ref/rep/newsite.html
@@ -0,0 +1,43 @@
+<!--Id: newsite.so,v 1.1 2001/10/13 19:56:24 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Connecting to a new site</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/comm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/init.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Connecting to a new site</h1>
+<p>Connecting to a new site in the replication group happens whenever the
+<a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function returns <a href="../../api_c/rep_message.html#DB_REP_NEWSITE">DB_REP_NEWSITE</a>. The application
+should assign the new site a local environment ID number, and all future
+messages from the site passed to <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> should include that
+environment ID number. It is possible, of course, for the application
+to be aware of a new site before the return of <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> (for
+example, applications using a connection-oriented protocols are likely
+to detect new sites immediately, while applications using broadcast
+protocols may not).
+<p>Regardless, in applications supporting the dynamic addition of database
+environments to replication groups, environments joining an existing
+replication group may need to provide contact information. (For
+example, in an application using TCP/IP sockets, a DNS name or IP
+address might be a reasonable value to provide.) This can be done using
+the <b>cdata</b> parameter to the <a href="../../api_c/rep_start.html">DB_ENV-&gt;rep_start</a> function. The information
+referenced by <b>cdata</b> is wrapped in the initial contact message
+sent by the new environment, and is provided to the existing members of
+the group using the <b>rec</b> parameter returned by <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a>.
+If no additional information was provided for Berkeley DB to forward to the
+existing members of the group, the <b>data</b> field of the <b>rec</b>
+parameter passed to the <a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> function will be NULL after
+<a href="../../api_c/rep_message.html">DB_ENV-&gt;rep_process_message</a> returns <a href="../../api_c/rep_message.html#DB_REP_NEWSITE">DB_REP_NEWSITE</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/comm.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/init.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/pri.html b/db/docs/ref/rep/pri.html
new file mode 100644
index 000000000..6ceaf09be
--- /dev/null
+++ b/db/docs/ref/rep/pri.html
@@ -0,0 +1,32 @@
+<!--Id: pri.so,v 1.1 2001/10/13 19:56:24 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Replication environment priorities</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/id.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/app.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Replication environment priorities</h1>
+<p>Each database environment included in a replication group must have a
+priority, which specifies a relative ordering among the different
+environments in a replication group. This ordering determines which
+environment will be selected as a new master in case the existing master
+fails.
+<p>Priorities may be any integer. Larger valued priorities indicate a more
+desirable master. For example, if a replication group consists of three
+database environments, two of which are connected by an OC3 and the
+third of which is connected by a T1, the third database environment
+should be assigned a priority value which is lower than either of the
+other two.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/id.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/app.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/rep/trans.html b/db/docs/ref/rep/trans.html
new file mode 100644
index 000000000..db8bd7b13
--- /dev/null
+++ b/db/docs/ref/rep/trans.html
@@ -0,0 +1,101 @@
+<!--Id: trans.so,v 1.1 2001/10/13 19:56:24 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Transactional guarantees</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Replication</dl></h3></td>
+<td align=right><a href="../../ref/rep/logonly.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Transactional guarantees</h1>
+<p>It is important to consider replication in the context of the overall
+database environment's transactional guarantees. To briefly review,
+transactional guarantees in a non-replicated application are based on
+the writing of log file records to "stable storage", usually a disk
+drive. If the application or system then fails, the Berkeley DB logging
+information is reviewed during recovery, and the databases are updated
+so that all changes made as part of committed transactions appear, and
+all changes made as part of uncommitted transactions do not appear. In
+this case, no information will have been lost.
+<p>If the database environment has been configured to not synchronously
+flush the log to stable storage when transactions are committed (using
+the <a href="../../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> flag to increase performance at the cost of
+sacrificing transactional durability), Berkeley DB recovery will only be able
+to restore the system to the state of the last commit found on stable
+storage. In this case, information may have been lost (for example,
+changes made on the part of a committed transaction may not appear in
+a database).
+<p>Finally, if there is database or log file loss or corruption (for
+example, if a disk drive fails), then catastrophic recovery is
+necessary, and Berkeley DB recovery will only be able to restore the system
+to the state of the last archived log file. In this case, information
+may also have been lost.
+<p>Replicating the database environment extends this model, by adding a
+new component to "stable storage": the client's replicated information.
+If a database environment is replicated, there is no lost information
+in the case of database or log file loss, because the replicated system
+contains a complete set of databases and log records up to the point of
+failure. A database environment that loses a disk drive can have the
+drive replaced, and it can rejoin the replication group as a client.
+<p>Because of this new component of stable storage, specifying
+<a href="../../api_c/env_open.html#DB_TXN_NOSYNC">DB_TXN_NOSYNC</a> in a replicated environment no longer sacrifices
+durability, as long as one or more clients have acknowledged receipt of
+the messages sent by the master. Since network connections are often
+faster than local disk writes, replication becomes a way for
+applications to significantly improve their performance as well as their
+reliability.
+<p>The return status from the <b>send</b> interface specified to the
+<a href="../../api_c/rep_transport.html">DB_ENV-&gt;set_rep_transport</a> function must be set by the application to ensure the
+transactional guarantees the application wants to provide. The effect
+of the <b>send</b> interface returning failure is to flush the local
+database environment's log as necessary to ensure that any information
+critical to database integrity is not lost. Because this flush is an
+expensive operation in terms of database performance, applications will
+want to avoid returning an error from the <b>send</b> interface.
+<p>First, there is no reason for the <b>send</b> interface to ever return
+failure unless the <a href="../../api_c/rep_transport.html#DB_REP_PERMANENT">DB_REP_PERMANENT</a> flag is specified. Messages
+without that flag do not make visible changes to databases, and
+therefore the application's <b>send</b> interface can return success
+to Berkeley DB immediately after it has broadcast the message (or even simply
+copied the message to local memory).
+<p>Further, unless the master's database environment has been configured
+to not synchronously flush the log on transaction commit, there is no
+reason for the <b>send</b> interface to ever return failure, as any
+information critical to database integrity has already been flushed to
+the local log before <b>send</b> was called. Again, the <b>send</b>
+interface should return success to Berkeley DB as soon as possible. However,
+in such cases, in order to avoid potential loss of information after
+the master database environment fails, the master database environment
+should be recovered before holding an election, as only the master
+database environment is guaranteed to have the most up-to-date logs.
+<p>To sum up, the only reason for the <b>send</b> interface to return
+failure is when the master database environment has been configured to
+not synchronously flush the log on transaction commit and the
+<b>send</b> interface was unable to determine that some number of
+clients received the message. How many clients should have received
+the message before the <b>send</b> interface can return success is an
+application choice, and, in fact, may not depend as much on a specific
+number of clients reporting success as one or more geographically
+distributed clients.
+<p>Of course, it is important to ensure that the replicated master and
+client environments are truly independent of each other. For example,
+it does not help matters that a client has acknowledged receipt of a
+message if both master and clients are on the same power supply, as the
+failure of the power supply will still potentially lose information.
+<p>Finally, the Berkeley DB replication implementation has one other additional
+feature to increase application reliability. Replication in Berkeley DB was
+implemented to perform database updates using a different code path than
+the standard ones. This means operations which manage to crash the
+replication master due to a software bug will not necessarily also crash
+replication clients.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/rep/logonly.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/rep/faq.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/transapp/hotfail.html b/db/docs/ref/transapp/hotfail.html
new file mode 100644
index 000000000..469a826b1
--- /dev/null
+++ b/db/docs/ref/transapp/hotfail.html
@@ -0,0 +1,82 @@
+<!--Id: hotfail.so,v 10.2 2001/07/28 01:45:07 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Hot failover</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Berkeley DB Transactional Data Store Applications</dl></h3></td>
+<td align=right><a href="../../ref/transapp/recovery.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/filesys.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Hot failover</h1>
+<p>For some applications, it may be useful to periodically snapshot the
+database environment for use as a hot failover should the primary system
+fail. The following steps can be taken to keep a backup environment in
+close synchrony with an active environment. The active environment is
+entirely unaffected by these procedures, and both read and write
+operations are allowed during all steps described here.
+<p><ol>
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-s</b> in the active environment to
+identify all of the active environment's database files, and copy them
+to the backup directory.
+<p>If the database files are stored in a separate directory from the other
+Berkeley DB files, it may be simpler to copy the directory itself instead of
+the individual files (see <a href="../../api_c/env_set_data_dir.html">DB_ENV-&gt;set_data_dir</a> for additional
+information). <b>Note: if any of the database files did not have
+an open <a href="../../api_c/db_create.html">DB</a> handle during the lifetime of the current log files,
+<a href="../../utility/db_archive.html">db_archive</a> will not list them in its output!</b> This is another
+reason it may be simpler to use a separate database file directory and
+copy the entire directory instead of archiving only the files listed by
+<a href="../../utility/db_archive.html">db_archive</a>.
+<p><li>Remove all existing log files from the backup directory.
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-l</b> in the active environment to
+identify all of the active environment's log files, and copy them to
+the backup directory.
+<p><li>Run <a href="../../utility/db_recover.html">db_recover</a> <b>-c</b> in the backup directory to
+catastrophically recover the copied environment.
+</ol>
+<p>Steps 2, 3 and 4 may be repeated as often as you like. If Step 1 (the
+initial copy of the database files) is repeated, then Steps 2, 3 and 4
+<b>must</b> be performed at least once in order to ensure a consistent
+database environment snapshot.
+<p>These procedures must be integrated with your other archival procedures,
+of course. If you are periodically removing log files from your active
+environment, you must be sure to copy them to the backup directory
+before removing them from the active directory. Not copying a log file
+to the backup directory and subsequently running recovery with it
+present may leave the backup snapshot of the environment corrupted. A
+simple way to ensure this never happens is to archive the log files in
+Step 2 as you remove them from the backup directory, and move inactive
+log files from your active environment into your backup directory
+(rather than copying them), in Step 3. The following steps describe
+this procedure in more detail:
+<p><ol>
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-s</b> in the active environment to
+identify all of the active environment's database files, and copy them
+to the backup directory.
+<p><li>Archive all existing log files from the backup directory, moving them
+to a backup device such as CD-ROM, alternate disk, or tape.
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> (without any option) in the active environment
+to identify all of the log files in the active environment that are no
+longer in use, and <b>move</b> them to the backup directory.
+<p><li>Run <a href="../../utility/db_archive.html">db_archive</a> <b>-l</b> in the active environment to
+identify all of the remaining log files in the active environment, and
+<b>copy</b> the log files to the backup directory.
+<p><li>Run <a href="../../utility/db_recover.html">db_recover</a> <b>-c</b> in the backup directory to
+catastrophically recover the copied environment.
+</ol>
+<p>As before, steps 2, 3, 4 and 5 may be repeated as often as you like.
+If Step 1 (the initial copy of the database files) is repeated, then
+Steps 2 through 5 <b>must</b> be performed at least once in order to
+ensure a consistent database environment snapshot.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/transapp/recovery.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/transapp/filesys.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/deadlock.html b/db/docs/ref/upgrade.4.0/deadlock.html
new file mode 100644
index 000000000..9fb8dbff5
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/deadlock.html
@@ -0,0 +1,25 @@
+<!--Id: deadlock.so,v 1.1 2001/09/07 16:55:56 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: db_deadlock</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/lock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: db_deadlock</h1>
+<p>The <b>-w</b> option to the <a href="../../utility/db_deadlock.html">db_deadlock</a> utility has been
+deprecated. Applications can get the functionality of the <b>-w</b>
+option by using the <b>-t</b> option with an argument of
+<b>.100000</b>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/intro.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/lock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/disk.html b/db/docs/ref/upgrade.4.0/disk.html
new file mode 100644
index 000000000..1bb72138e
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/disk.html
@@ -0,0 +1,25 @@
+<!--Id: disk.so,v 1.9 2001/09/18 16:08:16 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: upgrade requirements</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/run.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: upgrade requirements</h1>
+<p>The log file format changed in the Berkeley DB 4.0 release. No database
+formats changed in the Berkeley DB 4.0 release.
+<p>For further information on upgrading Berkeley DB installations, see
+<a href="../../ref/upgrade/process.html">Upgrading Berkeley DB installations</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/java.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/test/run.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/env.html b/db/docs/ref/upgrade.4.0/env.html
new file mode 100644
index 000000000..d7c6bbaa6
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/env.html
@@ -0,0 +1,81 @@
+<!--Id: env.so,v 1.5 2001/08/07 01:42:35 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: db_env_set_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/txn.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/rpc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: db_env_set_XXX</h1>
+<p>The db_env_set_region_init interface was removed in the 4.0 release and
+replaced with the <a href="../../api_c/env_set_flags.html#DB_REGION_INIT">DB_REGION_INIT</a> flag to the
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface. This is an interface change:
+historically, the db_env_set_region_init interface operated on the
+entire Berkeley DB library, not a single environment. The new interface only
+operates on a single <a href="../../api_c/env_create.html">DB_ENV</a> handle (and any handles created in
+the scope of that handle). Applications calling the
+db_env_set_region_init interface should update their calls: calls to
+the historic routine with an argument of 1 (0) are equivalent to calling
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> with the <a href="../../api_c/env_set_flags.html#DB_REGION_INIT">DB_REGION_INIT</a> flag and an
+argument of 1 (0).
+<p>The db_env_set_tas_spins interface was removed in the 4.0 release and
+replaced with the <a href="../../api_c/env_set_tas_spins.html">DB_ENV-&gt;set_tas_spins</a> function. This is an interface
+change: historically, the db_env_set_tas_spins interface operated on
+the entire Berkeley DB library, not a single environment. The new interface
+only operates on a single <a href="../../api_c/env_create.html">DB_ENV</a> handle (and any handles created
+in the scope of that handle). Applications calling the
+db_env_set_tas_spins interface should update their calls: calls to the
+historic routine are equivalent to calling <a href="../../api_c/env_set_tas_spins.html">DB_ENV-&gt;set_tas_spins</a> function
+with the same argument. In addition, for consistent behavior, all
+<a href="../../api_c/env_create.html">DB_ENV</a> handles opened by the application should make the same
+configuration call, or the value will need to be entered into the
+environment's <b>DB_CONFIG</b> file.
+<p>Also, three of the standard Berkeley DB debugging interfaces changed in the
+4.0 release. It is quite unlikely that Berkeley DB applications use these
+interfaces.
+<p>The DB_ENV-&gt;set_mutexlocks interface was removed in the 4.0 release
+and replaced with the <a href="../../api_c/env_set_flags.html#DB_NOLOCKING">DB_NOLOCKING</a> flag to the
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface. Applications calling the
+DB_ENV-&gt;set_mutexlocks interface should update their calls: calls
+to the historic routine with an argument of 1 (0) are equivalent to
+calling <a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> with the <a href="../../api_c/env_set_flags.html#DB_NOLOCKING">DB_NOLOCKING</a> flag and
+an argument of 1 (0).
+<p>The db_env_set_pageyield interface was removed in the 4.0 release and
+replaced with the <a href="../../api_c/env_set_flags.html#DB_YIELDCPU">DB_YIELDCPU</a> flag to the
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface. This is an interface change:
+historically, the db_env_set_pageyield interface operated on the entire
+Berkeley DB library, not a single environment. The new interface only
+operates on a single <a href="../../api_c/env_create.html">DB_ENV</a> handle (and any handles created in
+the scope of that handle). Applications calling the
+db_env_set_pageyield interface should update their calls: calls to the
+historic routine with an argument of 1 (0) are equivalent to calling
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> with the <a href="../../api_c/env_set_flags.html#DB_YIELDCPU">DB_YIELDCPU</a> flag and an
+argument of 1 (0). In addition, all <a href="../../api_c/env_create.html">DB_ENV</a> handles opened by
+the application will need to make the same call, or the
+<a href="../../api_c/env_set_flags.html#DB_YIELDCPU">DB_YIELDCPU</a> flag will need to be entered into the environment's
+<b>DB_CONFIG</b> file.
+<p>The db_env_set_panicstate interface was removed in the 4.0 release,
+replaced with the <a href="../../api_c/env_set_flags.html#DB_PANIC_ENVIRONMENT">DB_PANIC_ENVIRONMENT</a> and <a href="../../api_c/env_set_flags.html#DB_NOPANIC">DB_NOPANIC</a>
+flags to the <a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> interface. (The
+<a href="../../api_c/env_set_flags.html#DB_PANIC_ENVIRONMENT">DB_PANIC_ENVIRONMENT</a> flag will cause an environment to panic,
+affecting all threads of control using that environment. The
+<a href="../../api_c/env_set_flags.html#DB_NOPANIC">DB_NOPANIC</a> flag will cause a single <a href="../../api_c/env_create.html">DB_ENV</a> handle to
+ignore the current panic state of the environment.) This is an
+interface change: historically the db_env_set_panicstate interface
+operated on the entire Berkeley DB library, not a single environment.
+Applications calling the db_env_set_panicstate interface should update
+their calls, replacing the historic call with a call to
+<a href="../../api_c/env_set_flags.html">DB_ENV-&gt;set_flags</a> and the appropriate flag, depending on their
+usage of the historic interface.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/txn.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/rpc.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/intro.html b/db/docs/ref/upgrade.4.0/intro.html
new file mode 100644
index 000000000..6284e4bf5
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/intro.html
@@ -0,0 +1,26 @@
+<!--Id: intro.so,v 1.5 2001/09/28 15:10:29 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: introduction</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<a name="2"><!--meow--></a>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.3.3/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/deadlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: introduction</h1>
+<p>The following pages describe how to upgrade applications coded against
+the Berkeley DB 3.3 release interfaces to the Berkeley DB 4.0 release interfaces.
+This information does not describe how to upgrade Berkeley DB 1.85 release
+applications.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.3.3/disk.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/deadlock.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/java.html b/db/docs/ref/upgrade.4.0/java.html
new file mode 100644
index 000000000..cb51dd8bc
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/java.html
@@ -0,0 +1,32 @@
+<!--Id: java.so,v 1.4 2001/09/25 21:05:24 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: Java CLASSPATH environment variable</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/lock_id_free.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: Java CLASSPATH environment variable</h1>
+<p>The Berkeley DB Java class files are now packaged as jar files. In the 4.0
+release, the <b>CLASSPATH</b> environment variable must change to
+include at least the <b>db.jar</b> file. It can optionally include
+the <b>dbexamples.jar</b> file if you want to run the examples. For
+example, on UNIX:
+<p><blockquote><pre>export CLASSPATH="/usr/local/BerkeleyDB.4.0/lib/db.jar:/usr/local/BerkeleyDB.4.0/lib/dbexamples.jar"</pre></blockquote>
+<p>For example, on Windows:
+<p><blockquote><pre>set CLASSPATH="D:\db\build_win32\Release\db.jar;D:\db\build_win32\Release\dbexamples.jar"</pre></blockquote>
+<p>For more information on Java configuration, please see
+<a href="../../ref/java/conf.html">Java configuration</a> and
+<a href="../../ref/build_win/intro.html">Building for Win32</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/lock_id_free.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/disk.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/lock.html b/db/docs/ref/upgrade.4.0/lock.html
new file mode 100644
index 000000000..118f2d884
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/lock.html
@@ -0,0 +1,45 @@
+<!--Id: lock.so,v 1.6 2001/09/27 02:25:51 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: lock_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/deadlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/log.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: lock_XXX</h1>
+<p>The C API for the Berkeley DB Locking subsystem was reworked in the 4.0
+release as follows:
+<p><table border=1 align=center>
+<tr><th>Historic functional interface</th><th>Berkeley DB 4.X method</th></tr>
+<tr><td>lock_detect</td><td><a href="../../api_c/lock_detect.html">DB_ENV-&gt;lock_detect</a></td></tr>
+<tr><td>lock_get</td><td><a href="../../api_c/lock_get.html">DB_ENV-&gt;lock_get</a></td></tr>
+<tr><td>lock_id</td><td><a href="../../api_c/lock_id.html">DB_ENV-&gt;lock_id</a></td></tr>
+<tr><td>lock_put</td><td><a href="../../api_c/lock_put.html">DB_ENV-&gt;lock_put</a></td></tr>
+<tr><td>lock_stat</td><td><a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a></td></tr>
+<tr><td>lock_vec</td><td><a href="../../api_c/lock_vec.html">DB_ENV-&gt;lock_vec</a></td></tr>
+</table>
+<p>Applications calling any of these functions should update their calls
+to use the enclosing <a href="../../api_c/env_create.html">DB_ENV</a> handle's method (easily done as the
+first argument to the existing call is the correct handle to use).
+<p>In addition, the <a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a> call has been changed in the 4.0
+release to take a flags argument. To leave their historic behavior
+unchanged, applications should add a final argument of 0 to any calls
+made to <a href="../../api_c/lock_stat.html">DB_ENV-&gt;lock_stat</a>.
+<p>The C++ and Java APIs for the DbLock::put (DbLock.put) method was
+reworked in the 4.0 release to make the lock put interface a method of
+the <a href="../../api_c/env_create.html">DB_ENV</a> handle rather than the DbLock handle. Applications
+calling the DbLock::put or DbLock.put method should update their calls
+to use the enclosing <a href="../../api_c/env_create.html">DB_ENV</a> handle's method (easily done as the
+first argument to the existing call is the correct handle to use).
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/deadlock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/log.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/lock_id_free.html b/db/docs/ref/upgrade.4.0/lock_id_free.html
new file mode 100644
index 000000000..fb1b56a3c
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/lock_id_free.html
@@ -0,0 +1,25 @@
+<!--Id: lock_id_free.so,v 1.1 2001/09/25 21:05:24 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: DB_ENV-&gt;lock_id_free</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/set_lk_max.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: DB_ENV-&gt;lock_id_free</h1>
+<p>A new locker ID related API, the <a href="../../api_c/lock_id_free.html">DB_ENV-&gt;lock_id_free</a> function, was added to
+Berkeley DB 4.0 release. Applications using the <a href="../../api_c/lock_id.html">DB_ENV-&gt;lock_id</a> function to allocate
+locker IDs may want to update their applications to free the locker ID
+when it is no longer needed.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/set_lk_max.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/java.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/log.html b/db/docs/ref/upgrade.4.0/log.html
new file mode 100644
index 000000000..dfae570ab
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/log.html
@@ -0,0 +1,55 @@
+<!--Id: log.so,v 1.3 2001/09/28 15:09:42 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: log_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/lock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/mp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: log_XXX</h1>
+<p>The C API for the Berkeley DB Logging subsystem was reworked in the 4.0
+release as follows:
+<p><table border=1 align=center>
+<tr><th>Historic functional interface</th><th>Berkeley DB 4.X method</th></tr>
+<tr><td>log_archive</td><td><a href="../../api_c/log_archive.html">DB_ENV-&gt;log_archive</a></td></tr>
+<tr><td>log_file</td><td><a href="../../api_c/log_file.html">DB_ENV-&gt;log_file</a></td></tr>
+<tr><td>log_flush</td><td><a href="../../api_c/log_flush.html">DB_ENV-&gt;log_flush</a></td></tr>
+<tr><td>log_get</td><td><a href="../../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a>, <a href="../../api_c/logc_get.html">DB_LOGC-&gt;get</a>, <a href="../../api_c/logc_close.html">DB_LOGC-&gt;close</a></td></tr>
+<tr><td>log_put</td><td><a href="../../api_c/log_put.html">DB_ENV-&gt;log_put</a></td></tr>
+<tr><td>log_register</td><td><a href="../../api_c/log_register.html">DB_ENV-&gt;log_register</a></td></tr>
+<tr><td>log_stat</td><td><a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a></td></tr>
+<tr><td>log_unregister</td><td><a href="../../api_c/log_unregister.html">DB_ENV-&gt;log_unregister</a></td></tr>
+</table>
+<p>Applications calling any of these functions should update their calls
+to use the enclosing <a href="../../api_c/env_create.html">DB_ENV</a> handle's method (in all cases other
+than the log_get call, this is easily done as the first argument to the
+existing call is the correct handle to use).
+<p>Application calls to the historic log_get interface must be replaced
+with the creation of a log file cursor (a <a href="../../api_c/log_cursor.html">DB_LOGC</a> object), using
+the <a href="../../api_c/log_cursor.html">DB_ENV-&gt;log_cursor</a> function, calls to the <a href="../../api_c/logc_get.html">DB_LOGC-&gt;get</a> function to retrieve log
+records and calls to the <a href="../../api_c/logc_close.html">DB_LOGC-&gt;close</a> function to destroy the cursor. It
+may also be possible to simplify some applications. In previous
+releases of Berkeley DB, the DB_CURRENT, DB_NEXT, and DB_PREV flags to the
+log_get function could not be used by a free-threaded <a href="../../api_c/env_create.html">DB_ENV</a>
+handle. If their <a href="../../api_c/env_create.html">DB_ENV</a> handle was free-threaded, applications
+had to create an additional, unique environment handle by separately
+calling <a href="../../api_c/env_open.html">DB_ENV-&gt;open</a> without specifying <a href="../../api_c/env_open.html#DB_THREAD">DB_THREAD</a>. This
+is no longer an issue in the log cursor interface, and applications may
+be able to remove the now unnecessary creation of the additional
+<a href="../../api_c/env_create.html">DB_ENV</a> object.
+<p>Finally, the <a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a> call has been changed in the 4.0 release
+to take a flags argument. To leave their historic behavior unchanged,
+applications should add a final argument of 0 to any calls made to
+<a href="../../api_c/log_stat.html">DB_ENV-&gt;log_stat</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/lock.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/mp.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/mp.html b/db/docs/ref/upgrade.4.0/mp.html
new file mode 100644
index 000000000..c99a47863
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/mp.html
@@ -0,0 +1,65 @@
+<!--Id: mp.so,v 1.2 2001/09/27 02:25:52 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: memp_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/log.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/txn.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: memp_XXX</h1>
+<p>The C API for the Berkeley DB Memory Pool subsystem was reworked in the 4.0
+release as follows:
+<p><table border=1 align=center>
+<tr><th>Historic functional interface</th><th>Berkeley DB 4.X method</th></tr>
+<tr><td>memp_register</td><td><a href="../../api_c/memp_register.html">DB_ENV-&gt;memp_register</a></td></tr>
+<tr><td>memp_stat</td><td><a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a></td></tr>
+<tr><td>memp_sync</td><td><a href="../../api_c/memp_sync.html">DB_ENV-&gt;memp_sync</a></td></tr>
+<tr><td>memp_trickle</td><td><a href="../../api_c/memp_trickle.html">DB_ENV-&gt;memp_trickle</a></td></tr>
+<tr><td>memp_fopen</td><td><a href="../../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: ftype</td><td><a href="../../api_c/memp_set_ftype.html">DB_MPOOLFILE-&gt;set_ftype</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: pgcookie</td><td><a href="../../api_c/memp_set_pgcookie.html">DB_MPOOLFILE-&gt;set_pgcookie</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: fileid</td><td><a href="../../api_c/memp_set_fileid.html">DB_MPOOLFILE-&gt;set_fileid</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: lsn_offset</td><td><a href="../../api_c/memp_set_lsn_offset.html">DB_MPOOLFILE-&gt;set_lsn_offset</a></td></tr>
+<tr><td>DB_MPOOL_FINFO: clear_len</td><td><a href="../../api_c/memp_set_clear_len.html">DB_MPOOLFILE-&gt;set_clear_len</a></td></tr>
+<tr><td>memp_fopen</td><td><a href="../../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a></td></tr>
+<tr><td>memp_fclose</td><td><a href="../../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a></td></tr>
+<tr><td>memp_fput</td><td><a href="../../api_c/memp_fput.html">DB_MPOOLFILE-&gt;put</a></td></tr>
+<tr><td>memp_fset</td><td><a href="../../api_c/memp_fset.html">DB_MPOOLFILE-&gt;set</a></td></tr>
+<tr><td>memp_fsync</td><td><a href="../../api_c/memp_fsync.html">DB_MPOOLFILE-&gt;sync</a></td></tr>
+</table>
+<p>Applications calling any of the memp_register, memp_stat, memp_sync or
+memp_trickle interfaces should update those calls to use the enclosing
+<a href="../../api_c/env_create.html">DB_ENV</a> handle's method (easily done as the first argument to the
+existing call is the correct <a href="../../api_c/env_create.html">DB_ENV</a> handle).
+<p>In addition, the <a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a> call has been changed in the 4.0
+release to take a flags argument. To leave their historic behavior
+unchanged, applications should add a final argument of 0 to any calls
+made to <a href="../../api_c/memp_stat.html">DB_ENV-&gt;memp_stat</a>.
+<p>Applications calling the memp_fopen interface should update those calls
+as follows: First, acquire a <a href="../../api_c/memp_fcreate.html">DB_MPOOLFILE</a> handle using the
+<a href="../../api_c/memp_fcreate.html">DB_ENV-&gt;memp_fcreate</a> function. Second, if the DB_MPOOL_FINFO structure
+reference passed to the memp_fopen interface was non-NULL, call the
+<a href="../../api_c/memp_fcreate.html">DB_MPOOLFILE</a> method corresponding to each initialized field in
+the DB_MPOOL_FINFO structure. Third, call the <a href="../../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> function
+method to open the underlying file. If the <a href="../../api_c/memp_fopen.html">DB_MPOOLFILE-&gt;open</a> function call
+fails, then <a href="../../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> function must be called to destroy the allocated
+handle.
+<p>Applications calling the memp_fopen, memp_fclose, memp_fput, memp_fset,
+or memp_fsync interfaces should update those calls to use the enclosing
+<a href="../../api_c/memp_fcreate.html">DB_MPOOLFILE</a> handle's method. Again, this is easily done as the
+first argument to the existing call is the correct <a href="../../api_c/memp_fcreate.html">DB_MPOOLFILE</a>
+handle. With one exception, the calling conventions of the old a new
+interfaces are identical; the one exception is the <a href="../../api_c/memp_fclose.html">DB_MPOOLFILE-&gt;close</a> function,
+which requires an additional flag parameter that should be set to 0.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/log.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/txn.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/rpc.html b/db/docs/ref/upgrade.4.0/rpc.html
new file mode 100644
index 000000000..b273750fa
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/rpc.html
@@ -0,0 +1,26 @@
+<!--Id: rpc.so,v 1.5 2001/07/27 23:18:47 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: DB_ENV-&gt;set_server</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/env.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/set_lk_max.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: DB_ENV-&gt;set_server</h1>
+<p>The DB_ENV-&gt;set_server interface has been replaced with the
+<a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> function. The DB_ENV-&gt;set_server interface
+can be easily converted to the <a href="../../api_c/env_set_rpc_server.html">DB_ENV-&gt;set_rpc_server</a> function by changing
+the name, and specifying a NULL for the added argument, second in
+the argument list.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/env.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/set_lk_max.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/set_lk_max.html b/db/docs/ref/upgrade.4.0/set_lk_max.html
new file mode 100644
index 000000000..020b379e7
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/set_lk_max.html
@@ -0,0 +1,26 @@
+<!--Id: set_lk_max.so,v 1.3 2001/09/25 21:05:25 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: DB_ENV-&gt;set_lk_max</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/rpc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/lock_id_free.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: DB_ENV-&gt;set_lk_max</h1>
+<p>The DB_ENV-&gt;set_lk_max interface has been deprecated in favor of
+the <a href="../../api_c/env_set_lk_max_locks.html">DB_ENV-&gt;set_lk_max_locks</a>, <a href="../../api_c/env_set_lk_max_lockers.html">DB_ENV-&gt;set_lk_max_lockers</a>,
+and <a href="../../api_c/env_set_lk_max_objects.html">DB_ENV-&gt;set_lk_max_objects</a> functions. The DB_ENV-&gt;set_lk_max
+interface continues to be available, but is no longer documented and
+is expected to be removed in a future release.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/rpc.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/lock_id_free.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/toc.html b/db/docs/ref/upgrade.4.0/toc.html
new file mode 100644
index 000000000..3c579f944
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/toc.html
@@ -0,0 +1,35 @@
+<!--Id: toc.so,v 1.13 2001/09/25 21:05:25 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Upgrading Berkeley DB 3.3.X applications to Berkeley DB 4.0</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Upgrading Berkeley DB 3.3.X applications to Berkeley DB 4.0</h1>
+<p><ol>
+<p><li><a href="intro.html">Release 4.0: introduction</a>
+<li><a href="deadlock.html">Release 4.0: db_deadlock</a>
+<li><a href="lock.html">Release 4.0: lock_XXX</a>
+<li><a href="log.html">Release 4.0: log_XXX</a>
+<li><a href="mp.html">Release 4.0: memp_XXX</a>
+<li><a href="txn.html">Release 4.0: txn_XXX</a>
+<li><a href="env.html">Release 4.0: db_env_set_XXX</a>
+<li><a href="rpc.html">Release 4.0: DB_ENV-&gt;set_server</a>
+<li><a href="set_lk_max.html">Release 4.0: DB_ENV-&gt;set_lk_max</a>
+<li><a href="lock_id_free.html">Release 4.0: DB_ENV-&gt;lock_id_free</a>
+<li><a href="java.html">Release 4.0: Java CLASSPATH environment variable</a>
+<li><a href="disk.html">Release 4.0: upgrade requirements</a>
+</ol>
+<table width="100%"><tr><td><br></td><td align=right><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/docs/ref/upgrade.4.0/txn.html b/db/docs/ref/upgrade.4.0/txn.html
new file mode 100644
index 000000000..23e15227a
--- /dev/null
+++ b/db/docs/ref/upgrade.4.0/txn.html
@@ -0,0 +1,46 @@
+<!--Id: txn.so,v 1.5 2001/09/27 02:25:52 bostic Exp -->
+<!--Copyright 1997-2001 by Sleepycat Software, Inc.-->
+<!--All rights reserved.-->
+<html>
+<head>
+<title>Berkeley DB Reference Guide: Release 4.0: txn_XXX</title>
+<meta name="description" content="Berkeley DB: An embedded database programmatic toolkit.">
+<meta name="keywords" content="embedded,database,programmatic,toolkit,b+tree,btree,hash,hashing,transaction,transactions,locking,logging,access method,access methods,java,C,C++">
+</head>
+<body bgcolor=white>
+<table width="100%"><tr valign=top>
+<td><h3><dl><dt>Berkeley DB Reference Guide:<dd>Upgrading Berkeley DB Applications</dl></h3></td>
+<td align=right><a href="../../ref/upgrade.4.0/mp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/env.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p>
+<h1 align=center>Release 4.0: txn_XXX</h1>
+<p>The C API for the Berkeley DB Transaction subsystem was reworked in the 4.0
+release as follows:
+<p><table border=1 align=center>
+<tr><th>Historic functional interface</th><th>Berkeley DB 4.X method</th></tr>
+<tr><td>txn_abort</td><td><a href="../../api_c/txn_abort.html">DB_TXN-&gt;abort</a></td></tr>
+<tr><td>txn_begin</td><td><a href="../../api_c/txn_begin.html">DB_ENV-&gt;txn_begin</a></td></tr>
+<tr><td>txn_checkpoint</td><td><a href="../../api_c/txn_checkpoint.html">DB_ENV-&gt;txn_checkpoint</a></td></tr>
+<tr><td>txn_commit</td><td><a href="../../api_c/txn_commit.html">DB_TXN-&gt;commit</a></td></tr>
+<tr><td>txn_discard</td><td><a href="../../api_c/txn_discard.html">DB_TXN-&gt;discard</a></td></tr>
+<tr><td>txn_id</td><td><a href="../../api_c/txn_id.html">DB_TXN-&gt;id</a></td></tr>
+<tr><td>txn_prepare</td><td><a href="../../api_c/txn_prepare.html">DB_TXN-&gt;prepare</a></td></tr>
+<tr><td>txn_recover</td><td><a href="../../api_c/txn_recover.html">DB_ENV-&gt;txn_recover</a></td></tr>
+<tr><td>txn_stat</td><td><a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a></td></tr>
+</table>
+<p>Applications calling any of these functions should update their calls
+to use the enclosing <a href="../../api_c/env_create.html">DB_ENV</a> or <a href="../../api_c/txn_begin.html">DB_TXN</a> handle's method
+(easily done as the first argument to the existing call is the correct
+handle to use).
+<p>As a special case, since applications might potentially have many calls
+to the txn_abort, txn_begin and txn_commit functions, those interfaces
+continue to work unchanged in the Berkeley DB 4.0 release.
+<p>In addition, the <a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a> call has been changed in the 4.0
+release to take a flags argument. To leave their historic behavior
+unchanged, applications should add a final argument of 0 to any calls
+made to <a href="../../api_c/txn_stat.html">DB_ENV-&gt;txn_stat</a>.
+<table width="100%"><tr><td><br></td><td align=right><a href="../../ref/upgrade.4.0/mp.html"><img src="../../images/prev.gif" alt="Prev"></a><a href="../../reftoc.html"><img src="../../images/ref.gif" alt="Ref"></a><a href="../../ref/upgrade.4.0/env.html"><img src="../../images/next.gif" alt="Next"></a>
+</td></tr></table>
+<p><font size=1><a href="http://www.sleepycat.com">Copyright Sleepycat Software</a></font>
+</body>
+</html>
diff --git a/db/examples_c/bench_001.c b/db/examples_c/bench_001.c
new file mode 100644
index 000000000..a8faccc45
--- /dev/null
+++ b/db/examples_c/bench_001.c
@@ -0,0 +1,341 @@
+/*-
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+/*
+ * bench_001 - time bulk fetch interface.
+ * Without -R builds a btree acording to the arguments.
+ * With -R runs and times bulk fetches. If -d is specified
+ * during reads the DB_MULTIPLE interface is used
+ * otherwise the DB_MULTIPLE_KEY interface is used.
+ *
+ * ARGUMENTS:
+ * -c cachesize [1000 * pagesize]
+ * -d number of duplicates [none]
+ * -E don't use environment
+ * -I Just initialize the environment
+ * -i number of read iterations [1000000]
+ * -l length of data item [20]
+ * -n number of keys [1000000]
+ * -p pagesize [65536]
+ * -R perform read test.
+ * -T incorporate transactions.
+ *
+ * COMPILE:
+ * cc -I /usr/local/BerkeleyDB/include \
+ * -o bench_001 -O2 bench_001.c /usr/local/BerkeleyDB/lib/libdb.so
+ */
+#include <sys/types.h>
+
+#include <sys/time.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#define DATABASE "bench_001.db"
+
+int main(int, char *[]);
+void usage(void);
+
+const char
+ *progname = "bench_001"; /* Program name. */
+/*
+ * db_init --
+ * Initialize the environment.
+ */
+DB_ENV *
+db_init(home, prefix, cachesize, txn)
+ char *home, *prefix;
+ int cachesize, txn;
+{
+ DB_ENV *dbenv;
+ int flags, ret;
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ dbenv->err(dbenv, ret, "db_env_create");
+ return (NULL);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, prefix);
+ (void)dbenv->set_cachesize(dbenv, 0,
+ cachesize == 0 ? 50 * 1024 * 1024 : (u_int32_t)cachesize, 0);
+
+ flags = DB_CREATE | DB_INIT_MPOOL;
+ if (txn)
+ flags |= DB_INIT_TXN | DB_INIT_LOCK;
+ if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) {
+ dbenv->err(dbenv, ret, "DB_ENV->open: %s", home);
+ (void)dbenv->close(dbenv, 0);
+ return (NULL);
+ }
+ return (dbenv);
+}
+
+/*
+ * get -- loop getting batches of records.
+ *
+ */
+int
+get(dbp, txn, datalen, num, dups, iter, countp)
+ DB *dbp;
+ int txn, datalen, num, dups, iter, *countp;
+{
+ DBC *dbcp;
+ DBT key, data;
+ DB_TXN *txnp;
+ u_int32_t len, klen;
+ int count, flags, i, j, ret;
+ void *pointer, *dp, *kp;
+
+ memset(&key, 0, sizeof(key));
+ key.data = &j;
+ key.size = sizeof(j);
+ memset(&data, 0, sizeof(data));
+ data.flags = DB_DBT_USERMEM;
+ data.data = malloc(datalen*1024*1024);
+ data.ulen = data.size = datalen*1024*1024;
+ count = 0;
+ flags = DB_SET;
+ if (!dups)
+ flags |= DB_MULTIPLE_KEY;
+ else
+ flags |= DB_MULTIPLE;
+ for (i = 0; i < iter; i++) {
+ txnp = NULL;
+ if (txn)
+ dbp->dbenv->txn_begin(dbp->dbenv, NULL, &txnp, 0);
+ dbp->cursor(dbp, txnp, &dbcp, 0);
+
+ j = random() % num;
+ switch (ret = dbcp->c_get(dbcp, &key, &data, flags)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbcp->dbp, ret, "DBC->c_get");
+ return (ret);
+ }
+ DB_MULTIPLE_INIT(pointer, &data);
+ if (dups)
+ while (pointer != NULL) {
+ DB_MULTIPLE_NEXT(pointer, &data, dp, len);
+ if (dp != NULL)
+ count++;
+ }
+ else
+ while (pointer != NULL) {
+ DB_MULTIPLE_KEY_NEXT(pointer,
+ &data, kp, klen, dp, len);
+ if (kp != NULL)
+ count++;
+ }
+ dbcp->c_close(dbcp);
+ if (txn)
+ txnp->commit(txnp, 0);
+ }
+
+ *countp = count;
+ return (0);
+}
+
+/*
+ * fill - fill a db
+ */
+int
+fill(dbp, datalen, num, dups)
+ DB *dbp;
+ int datalen, num, dups;
+{
+ DBT key, data;
+ struct data {
+ int id;
+ char str[1];
+ } *data_val;
+ int count, i, ret;
+ /*
+ * Insert records into the database, where the key is the user
+ * input and the data is the user input in reverse order.
+ */
+ count = 0;
+ memset(&key, 0, sizeof(DBT));
+ memset(&data, 0, sizeof(DBT));
+ key.data = &i;
+ key.size = sizeof(i);
+ data.data = data_val = (struct data *) malloc(datalen);
+ memcpy(data_val->str, "0123456789012345678901234567890123456789",
+ datalen - sizeof (data_val->id));
+ data.size = datalen;
+ data.flags = DB_DBT_USERMEM;
+
+ for (i = 0; i < num; i++) {
+ data_val->id = 0;
+ do {
+ switch (ret =
+ dbp->put(dbp, NULL, &key, &data, 0)) {
+ case 0:
+ count++;
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ return (ret);
+ break;
+ }
+ } while (++data_val->id < dups);
+ }
+ printf("%d\n", count);
+ return (0);
+}
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB *dbp;
+ DB_ENV *dbenv;
+ struct timeval start_time, end_time;
+ double secs;
+ int ch, count, env, ret;
+ int cache, datalen, dups, init, iter, num, pagesize, read, txn;
+
+ datalen = 20;
+ iter = num = 1000000;
+ pagesize = 65536;
+ cache = 1000 * pagesize;
+ env = 1;
+ read = 0;
+ dups = 0;
+ init = 0;
+ txn = 0;
+ while ((ch = getopt(argc, argv, "c:d:EIi:l:n:p:RT")) != EOF)
+ switch (ch) {
+ case 'c':
+ cache = atoi(optarg);
+ break;
+ case 'd':
+ dups = atoi(optarg);
+ break;
+ case 'E':
+ env = 0;
+ break;
+ case 'I':
+ init = 1;
+ break;
+ case 'i':
+ iter = atoi(optarg);
+ break;
+ case 'l':
+ datalen = atoi(optarg);
+ break;
+ case 'n':
+ num = atoi(optarg);
+ break;
+ case 'p':
+ pagesize = atoi(optarg);
+ break;
+ case 'R':
+ read = 1;
+ break;
+ case 'T':
+ txn = 1;
+ break;
+ case '?':
+ default:
+ usage();
+ }
+ argc -= optind;
+ argv += optind;
+
+ /* Remove the previous database. */
+ if (!read) {
+ if (env)
+ system("rm -rf BENCH_001; mkdir BENCH_001");
+ else
+ (void)unlink(DATABASE);
+ }
+
+ dbenv = NULL;
+ if (env == 1 &&
+ (dbenv = db_init("BENCH_001", "bench_001", cache, txn)) == NULL)
+ return (-1);
+ if (init)
+ exit(0);
+ /* Create and initialize database object, open the database. */
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr,
+ "%s: db_create: %s\n", progname, db_strerror(ret));
+ exit(EXIT_FAILURE);
+ }
+ dbp->set_errfile(dbp, stderr);
+ dbp->set_errpfx(dbp, progname);
+ if ((ret = dbp->set_pagesize(dbp, pagesize)) != 0) {
+ dbp->err(dbp, ret, "set_pagesize");
+ goto err1;
+ }
+ if (dups && (ret = dbp->set_flags(dbp, DB_DUP)) != 0) {
+ dbp->err(dbp, ret, "set_flags");
+ goto err1;
+ }
+
+ if (env == 0 && (ret = dbp->set_cachesize(dbp, 0, cache, 0)) != 0) {
+ dbp->err(dbp, ret, "set_cachesize");
+ goto err1;
+ }
+
+ if ((ret = dbp->set_flags(dbp, DB_DUP)) != 0) {
+ dbp->err(dbp, ret, "set_flags");
+ goto err1;
+ }
+ if ((ret = dbp->open(dbp,
+ DATABASE, NULL, DB_BTREE, DB_CREATE, 0664)) != 0) {
+ dbp->err(dbp, ret, "%s: open", DATABASE);
+ goto err1;
+ }
+
+ if (read) {
+ /* If no environment, fill the cache. */
+ if (!env && (ret =
+ get(dbp, txn, datalen, num, dups, iter, &count)) != 0)
+ goto err1;
+
+ /* Time the get loop. */
+ gettimeofday(&start_time, NULL);
+ if ((ret =
+ get(dbp, txn, datalen, num, dups, iter, &count)) != 0)
+ goto err1;
+ gettimeofday(&end_time, NULL);
+ secs =
+ (((double)end_time.tv_sec * 1000000 + end_time.tv_usec) -
+ ((double)start_time.tv_sec * 1000000 + start_time.tv_usec))
+ / 1000000;
+ printf("%d records read using %d batches in %.2f seconds: ",
+ count, iter, secs);
+ printf("%.0f records/second\n", (double)count / secs);
+
+ } else if ((ret = fill(dbp, datalen, num, dups)) != 0)
+ goto err1;
+
+ /* Close everything down. */
+ if ((ret = dbp->close(dbp, read ? DB_NOSYNC : 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s\n", progname, db_strerror(ret));
+ return (1);
+ }
+ return (0);
+
+err1: (void)dbp->close(dbp, 0);
+ return (1);
+}
+
+void
+usage()
+{
+ (void)fprintf(stderr, "usage: %s %s\n\t%s\n",
+ progname, "[-EIRT] [-c cachesize] [-d dups]",
+ "[-i iterations] [-l datalen] [-n keys] [-p pagesize]");
+ exit(EXIT_FAILURE);
+}
diff --git a/db/examples_c/ex_repquote/ex_repquote.h b/db/examples_c/ex_repquote/ex_repquote.h
new file mode 100644
index 000000000..eaf3423cc
--- /dev/null
+++ b/db/examples_c/ex_repquote/ex_repquote.h
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: ex_repquote.h,v 1.19 2001/10/13 13:13:16 bostic Exp
+ */
+
+#ifndef _EX_REPQUOTE_H_
+#define _EX_REPQUOTE_H_
+
+#define SELF_EID 1
+
+typedef struct {
+ char *host; /* Host name. */
+ u_int32_t port; /* Port on which to connect to this site. */
+} site_t;
+
+/* Globals */
+extern int master_eid;
+extern char *myaddr;;
+
+struct __member; typedef struct __member member_t;
+struct __machtab; typedef struct __machtab machtab_t;
+
+/* Arguments for the connect_all thread. */
+typedef struct {
+ DB_ENV *dbenv;
+ char *progname;
+ char *home;
+ machtab_t *machtab;
+ site_t *sites;
+ int nsites;
+} all_args;
+
+/* Arguments for the connect_loop thread. */
+typedef struct {
+ DB_ENV *dbenv;
+ char * home;
+ char * progname;
+ machtab_t *machtab;
+ int port;
+} connect_args;
+
+#define CACHESIZE (10 * 1024 * 1024)
+#define DATABASE "quote.db"
+#define SLEEPTIME 3
+
+void *connect_all __P((void *args));
+void *connect_thread __P((void *args));
+int doclient __P((DB_ENV *, char *, machtab_t *));
+int domaster __P((DB_ENV *, char *));
+int get_accepted_socket __P((char *, int));
+int get_connected_socket __P((machtab_t *, char *, char *, int, int *, int *));
+int get_next_message __P((int, DBT *, DBT *));
+int listen_socket_init __P((char *, int));
+int listen_socket_accept __P((machtab_t *, char *, int, int *));
+int machtab_getinfo __P((machtab_t *, int, u_int32_t *, int *));
+int machtab_init __P((machtab_t **, int, int));
+void machtab_parm __P((machtab_t *, int *, int *, u_int32_t *, u_int32_t *));
+int machtab_rem __P((machtab_t *, int, int));
+int quote_send __P((DB_ENV *, void *, const DBT *, DBT *, u_int32_t, int));
+
+#define COMPQUIET(x,y) x = (y)
+
+#endif /* !_EX_REPQUOTE_H_ */
diff --git a/db/examples_c/ex_repquote/ex_rq_client.c b/db/examples_c/ex_repquote/ex_rq_client.c
new file mode 100644
index 000000000..440d3766a
--- /dev/null
+++ b/db/examples_c/ex_repquote/ex_rq_client.c
@@ -0,0 +1,285 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: ex_rq_client.c,v 1.21 2001/10/09 14:45:43 margo Exp
+ */
+
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+static void *check_loop __P((void *));
+static void *display_loop __P((void *));
+static int print_stocks __P((DBC *));
+
+typedef struct {
+ char *progname;
+ DB_ENV *dbenv;
+} disploop_args;
+
+typedef struct {
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+} checkloop_args;
+
+int
+doclient(dbenv, progname, machtab)
+ DB_ENV *dbenv;
+ char *progname;
+ machtab_t *machtab;
+{
+ checkloop_args cargs;
+ disploop_args dargs;
+ pthread_t check_thr, disp_thr;
+ void *cstatus, *dstatus;
+ int rval, s;
+
+ rval = EXIT_SUCCESS;
+ s = -1;
+
+ memset(&dargs, 0, sizeof(dargs));
+ dstatus = (void *)EXIT_FAILURE;
+
+ dargs.progname = progname;
+ dargs.dbenv = dbenv;
+ if (pthread_create(&disp_thr, NULL, display_loop, (void *)&dargs)) {
+ fprintf(stderr, "%s: display_loop pthread_create failed: %s\n",
+ progname, strerror(errno));
+ goto err;
+ }
+
+ cargs.dbenv = dbenv;
+ cargs.machtab = machtab;
+ if (pthread_create(&check_thr, NULL, check_loop, (void *)&cargs)) {
+ fprintf(stderr, "%s: check_thread pthread_create failed: %s\n",
+ progname, strerror(errno));
+ goto err;
+ }
+ if (pthread_join(disp_thr, &dstatus) ||
+ pthread_join(check_thr, &cstatus)) {
+ fprintf(stderr, "%s: pthread_join failed: %s\n",
+ progname, strerror(errno));
+ goto err;
+ }
+
+ if (0) {
+err: rval = EXIT_FAILURE;
+ }
+ return (rval);
+}
+
+/*
+ * Our only job is to check that the master is valid and if it's not
+ * for an extended period, to trigger an election. We do two phases.
+ * If we do not have a master, first we send out a request for a master
+ * to identify itself (that would be a call to rep_start). If that fails,
+ * we trigger an election.
+ */
+static void *
+check_loop(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ DBT dbt;
+ checkloop_args *cargs;
+ int count, n, is_me, pri;
+ machtab_t *machtab;
+ u_int32_t check, elect;
+
+ cargs = (checkloop_args *)args;
+ dbenv = cargs->dbenv;
+ machtab = cargs->machtab;
+
+#define IDLE_INTERVAL 1
+
+ count = 0;
+ while (1) {
+ sleep(IDLE_INTERVAL);
+
+ /* If we become master, shut this loop off. */
+ if (master_eid == SELF_EID)
+ break;
+
+ if (master_eid == DB_INVALID_EID)
+ count++;
+ else
+ count = 0;
+
+ if (count <= 1)
+ continue;
+
+ /*
+ * First, call rep_start (possibly again) to see if we can
+ * find a master.
+ */
+
+ memset(&dbt, 0, sizeof(dbt));
+ dbt.data = myaddr;
+ dbt.size = strlen(myaddr) + 1;
+ (void)dbenv->rep_start(dbenv, &dbt, DB_REP_CLIENT);
+ sleep(IDLE_INTERVAL);
+
+ if (master_eid != DB_INVALID_EID) {
+ count = 0;
+ continue;
+ }
+
+ /* Now call for an election */
+ machtab_parm(machtab, &n, &pri, &check, &elect);
+ if (dbenv->rep_elect(dbenv,
+ n, pri, check, elect, &master_eid, &is_me) != 0)
+ continue;
+
+ /* If I'm the new master, I can stop checking for masters. */
+ if (is_me) {
+ master_eid = SELF_EID;
+ break;
+ }
+ }
+
+ return ((void *)EXIT_SUCCESS);
+}
+
+static void *
+display_loop(args)
+ void *args;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DBC *dbc;
+ char *progname;
+ disploop_args *dargs;
+ int ret, rval;
+
+ dargs = (disploop_args *)args;
+ progname = dargs->progname;
+ dbenv = dargs->dbenv;
+
+ dbc = NULL;
+ dbp = NULL;
+
+ for (;;) {
+ /* If we become master, shut this loop off. */
+ if (master_eid == SELF_EID)
+ break;
+
+ if (dbp == NULL) {
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: db_create: %s\n", progname,
+ db_strerror(ret));
+ return ((void *)EXIT_FAILURE);
+ }
+
+ if ((ret = dbp->open(dbp,
+ DATABASE, NULL, DB_BTREE, DB_RDONLY, 0)) != 0) {
+ if (ret == ENOENT) {
+ printf(
+ "No stock database yet available.\n");
+ if ((ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr,
+ "%s: DB->close: %s",
+ progname,
+ db_strerror(ret));
+ goto err;
+ }
+ dbp = NULL;
+ sleep(SLEEPTIME);
+ continue;
+ }
+ fprintf(stderr, "%s: DB->open: %s\n", progname,
+ db_strerror(ret));
+ goto err;
+ }
+ }
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0) {
+ fprintf(stderr, "%s: DB->cursor: %s\n", progname,
+ db_strerror(ret));
+ goto err;
+ }
+
+ if ((ret = print_stocks(dbc)) != 0) {
+ fprintf(stderr, "%s: database traversal failed: %s\n",
+ progname, db_strerror(ret));
+ goto err;
+ }
+
+ if ((ret = dbc->c_close(dbc)) != 0) {
+ fprintf(stderr, "%s: DB->close: %s\n", progname,
+ db_strerror(ret));
+ goto err;
+ }
+
+ dbc = NULL;
+
+ sleep(SLEEPTIME);
+ }
+
+ rval = EXIT_SUCCESS;
+
+ if (0) {
+err: rval = EXIT_FAILURE;
+ }
+
+ if (dbc != NULL && (ret = dbc->c_close(dbc)) != 0) {
+ fprintf(stderr, "%s: DB->close: %s\n", progname,
+ db_strerror(ret));
+ rval = EXIT_FAILURE;
+ }
+
+ if (dbp != NULL && (ret = dbp->close(dbp, 0)) != 0) {
+ fprintf(stderr, "%s: DB->close: %s\n", progname,
+ db_strerror(ret));
+ return ((void *)EXIT_FAILURE);
+ }
+
+ return ((void *)rval);
+}
+
+static int
+print_stocks(dbc)
+ DBC *dbc;
+{
+ DBT key, data;
+#define MAXKEYSIZE 10
+#define MAXDATASIZE 20
+ char keybuf[MAXKEYSIZE + 1], databuf[MAXDATASIZE + 1];
+ int ret;
+ u_int32_t keysize, datasize;
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ printf("\tSymbol\tPrice\n");
+ printf("\t======\t=====\n");
+
+ for (ret = dbc->c_get(dbc, &key, &data, DB_FIRST);
+ ret == 0;
+ ret = dbc->c_get(dbc, &key, &data, DB_NEXT)) {
+ keysize = key.size > MAXKEYSIZE ? MAXKEYSIZE : key.size;
+ memcpy(keybuf, key.data, keysize);
+ keybuf[keysize] = '\0';
+
+ datasize = data.size >= MAXDATASIZE ? MAXDATASIZE : data.size;
+ memcpy(databuf, data.data, datasize);
+ databuf[datasize] = '\0';
+
+ printf("\t%s\t%s\n", keybuf, databuf);
+ }
+ printf("\n");
+ return (ret == DB_NOTFOUND ? 0 : ret);
+}
diff --git a/db/examples_c/ex_repquote/ex_rq_main.c b/db/examples_c/ex_repquote/ex_rq_main.c
new file mode 100644
index 000000000..6ee27d788
--- /dev/null
+++ b/db/examples_c/ex_repquote/ex_rq_main.c
@@ -0,0 +1,310 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: ex_rq_main.c,v 1.11 2001/10/09 14:45:43 margo Exp
+ */
+
+#include <sys/types.h>
+#include <pthread.h>
+
+#include <errno.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+/*
+ * Process globals (we could put these in the machtab I suppose.
+ */
+int master_eid;
+char *myaddr;
+
+int env_init __P((char *, char *, DB_ENV **, machtab_t *, u_int32_t));
+static void usage __P((char *));
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ extern char *optarg;
+ extern int optind;
+ DB_ENV *dbenv;
+ DBT local;
+ enum { MASTER, CLIENT, UNKNOWN } whoami;
+ all_args aa;
+ char *c, ch, *home, *progname;
+ connect_args ca;
+ int maxsites, nsites, ret, priority, totalsites;
+ machtab_t *machtab;
+ pthread_t all_thr, conn_thr;
+ site_t site, *sitep, self, *selfp;
+ struct sigaction sigact;
+ void *astatus, *cstatus;
+
+ home = "TESTDIR";
+ machtab = NULL;
+ selfp = sitep = NULL;
+ maxsites = nsites = totalsites = 0;
+ progname = "ex_repquote";
+ ret = 0;
+ whoami = UNKNOWN;
+ priority = 100;
+
+ master_eid = DB_INVALID_EID;
+
+ while ((ch = getopt(argc, argv, "Ch:Mm:n:o:p:")) != EOF)
+ switch (ch) {
+ case 'M':
+ whoami = MASTER;
+ master_eid = SELF_EID;
+ break;
+ case 'C':
+ whoami = CLIENT;
+ break;
+ case 'h':
+ home = optarg;
+ break;
+ case 'm':
+ if ((myaddr = strdup(optarg)) == NULL) {
+ fprintf(stderr,
+ "System error %s\n", strerror(errno));
+ goto err;
+ }
+ self.host = optarg;
+ self.host = strtok(self.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ fprintf(stderr, "Bad host specification.\n");
+ goto err;
+ }
+ self.port = atoi(c);
+ selfp = &self;
+ break;
+ case 'n':
+ totalsites = atoi(optarg);
+ break;
+ case 'o':
+ site.host = optarg;
+ site.host = strtok(site.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ fprintf(stderr, "Bad host specification.\n");
+ goto err;
+ }
+ site.port = atoi(c);
+ if (sitep == NULL || nsites >= maxsites) {
+ maxsites = maxsites == 0 ? 10 : 2 * maxsites;
+ if ((sitep = realloc(sitep,
+ maxsites * sizeof(site_t))) == NULL) {
+ fprintf(stderr, "System error %s\n",
+ strerror(errno));
+ goto err;
+ }
+ }
+ sitep[nsites++] = site;
+ break;
+ case 'p':
+ priority = atoi(optarg);
+ break;
+ case '?':
+ default:
+ usage(progname);
+ }
+
+ /* Error check command line. */
+ if (whoami == UNKNOWN) {
+ fprintf(stderr, "Must specify -M or -C.\n");
+ goto err;
+ }
+
+ if (selfp == NULL)
+ usage(progname);
+
+ if (home == NULL)
+ usage(progname);
+
+ /*
+ * Turn off SIGPIPE so that we don't kill processes when they
+ * happen to lose a connection at the wrong time.
+ */
+ memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = SIG_IGN;
+ if ((ret = sigaction(SIGPIPE, &sigact, NULL)) != 0) {
+ fprintf(stderr,
+ "Unable to turn off SIGPIPE: %s\n", strerror(ret));
+ goto err;
+ }
+
+ /*
+ * We are hardcoding priorities here that all clients have the
+ * same priority except for a designated master who gets a higher
+ * priority.
+ * XXX If we add a logsonly site to this app, we can give it
+ * priority 0; I say we should document that priority=0 sites should
+ * never become masters; they still need to vote, they just can't
+ * be masters.
+ */
+ if ((ret =
+ machtab_init(&machtab, priority, totalsites)) != 0)
+ goto err;
+
+ /*
+ * We can know open our environment, although we're not ready to
+ * begin replicating. However, we want to have a dbenv around
+ * so that we can send it into any of our message handlers.
+ */
+ if ((ret = env_init(progname,
+ home, &dbenv, machtab, DB_THREAD | DB_RECOVER)) != 0)
+ goto err;
+ /*
+ * Now sets up comm infrastructure. There are two phases. First,
+ * we open our port for listening for incoming connections. Then
+ * we attempt to connect to every host we know about.
+ */
+
+ ca.dbenv = dbenv;
+ ca.home = home;
+ ca.progname = progname;
+ ca.machtab = machtab;
+ ca.port = selfp->port;
+ if ((ret = pthread_create(&conn_thr, NULL, connect_thread, &ca)) != 0)
+ goto err;
+
+ aa.dbenv = dbenv;
+ aa.progname = progname;
+ aa.home = home;
+ aa.machtab = machtab;
+ aa.sites = sitep;
+ aa.nsites = nsites;
+ if ((ret = pthread_create(&all_thr, NULL, connect_all, &aa)) != 0)
+ goto err;
+
+ /*
+ * We have now got the entire communication infrastructure set up.
+ * It's time to declare ourselves to be a client or master.
+ * XXX How do I decide that I have to switch from doing the read
+ * loop to the write loop or vica versa?
+ */
+ if (whoami == MASTER) {
+ if ((ret = dbenv->rep_start(dbenv, NULL, DB_REP_MASTER)) != 0) {
+ fprintf(stderr,
+ "dbenv->rep_start failed %s\n", db_strerror(ret));
+ goto err;
+ }
+ if ((ret = domaster(dbenv, progname)) != 0) {
+ fprintf(stderr, "Master failed %s\n", db_strerror(ret));
+ goto err;
+ }
+ } else {
+ memset(&local, 0, sizeof(local));
+ local.data = myaddr;
+ local.size = strlen(myaddr) + 1;
+ if ((ret =
+ dbenv->rep_start(dbenv, &local, DB_REP_CLIENT)) != 0) {
+ fprintf(stderr,
+ "dbenv->rep_start failed %s\n", db_strerror(ret));
+ goto err;
+ }
+ if ((ret = doclient(dbenv, progname, machtab)) != 0) {
+ fprintf(stderr, "Client failed %s\n", db_strerror(ret));
+ goto err;
+ }
+
+ }
+
+ /* Wait on the connection threads. */
+ if (pthread_join(all_thr, &astatus) || pthread_join(conn_thr, &cstatus))
+ ret = errno;
+ if (ret == 0 &&
+ ((int)astatus != EXIT_SUCCESS || (int)cstatus != EXIT_SUCCESS))
+ ret = -1;
+
+err: if (machtab != NULL)
+ free(machtab);
+ if (dbenv != NULL)
+ (void)dbenv->close(dbenv, 0);
+ return (ret);
+}
+
+/*
+ * In this application, we specify all communication via the command line.
+ * In a real application, we would expect that information about the other
+ * sites in the system would be maintained in some sort of configuration
+ * file. The critical part of this interface is that we assume at startup
+ * that we can find out 1) what host/port we wish to listen on for connections,
+ * 2) a (possibly empty) list of other sites we should attempt to connect to.
+ * 3) whether we are a master or client (if we don't know, we should come up
+ * as a client and see if there is a master out there) and 4) what our
+ * Berkeley DB home environment is.
+ *
+ * These pieces of information are expressed by the following flags.
+ * -m host:port (required; m stands for me)
+ * -o host:port (optional; o stands for other; any number of these may be
+ * specified)
+ * -[MC] M for master/C for client
+ * -h home directory
+ * -n nsites (optional; number of sites in replication group; defaults to 0
+ * in which case we try to dynamically computer the number of sites in
+ * the replication group.)
+ */
+static void
+usage(progname)
+ char *progname;
+{
+ fprintf(stderr, "usage: %s ", progname);
+ fprintf(stderr, "[-CM][-h home][-o host:port][-m host:port]%s",
+ "[-n nsites][-p priority]\n");
+ exit(EXIT_FAILURE);
+}
+
+/* Open and configure an environment. */
+int
+env_init(progname, home, dbenvp, machtab, flags)
+ char *progname, *home;
+ DB_ENV **dbenvp;
+ machtab_t *machtab;
+ u_int32_t flags;
+{
+ DB_ENV *dbenv;
+ int ret;
+ char *prefix;
+
+ if ((prefix = malloc(strlen(progname) + 2)) == NULL) {
+ fprintf(stderr,
+ "%s: System error: %s\n", progname, strerror(errno));
+ return (errno);
+ }
+ sprintf(prefix, "%s:", progname);
+
+ if ((ret = db_env_create(&dbenv, 0)) != 0) {
+ fprintf(stderr, "%s: env create failed: %s\n",
+ progname, db_strerror(ret));
+ return (ret);
+ }
+ dbenv->set_errfile(dbenv, stderr);
+ dbenv->set_errpfx(dbenv, prefix);
+ (void)dbenv->set_cachesize(dbenv, 0, CACHESIZE, 0);
+ /* (void)dbenv->set_flags(dbenv, DB_TXN_NOSYNC, 1); */
+
+ (void)dbenv->set_rep_transport(dbenv,
+ SELF_EID, (void *)machtab, quote_send);
+
+ /*
+ * When we have log cursors, we can open this threaded and then
+ * not create a new environment for each thread.
+ */
+ flags |= DB_CREATE |
+ DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN;
+
+ ret = dbenv->open(dbenv, home, flags, 0);
+
+ *dbenvp = dbenv;
+ return (ret);
+}
+
diff --git a/db/examples_c/ex_repquote/ex_rq_master.c b/db/examples_c/ex_repquote/ex_rq_master.c
new file mode 100644
index 000000000..0bf732b7b
--- /dev/null
+++ b/db/examples_c/ex_repquote/ex_rq_master.c
@@ -0,0 +1,159 @@
+/*-
+ * #include <pthread.h>
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: ex_rq_master.c,v 1.15 2001/10/13 13:13:16 bostic Exp
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+static void *master_loop __P((void *));
+
+#define BUFSIZE 1024
+
+int
+domaster(dbenv, progname)
+ DB_ENV *dbenv;
+ char *progname;
+{
+ int ret, t_ret;
+ pthread_t interface_thr;
+ pthread_attr_t attr;
+
+ COMPQUIET(progname, NULL);
+
+ /* Spawn off a thread to handle the basic master interface. */
+ if ((ret = pthread_attr_init(&attr)) != 0 &&
+ (ret = pthread_attr_setdetachstate(&attr,
+ PTHREAD_CREATE_DETACHED)) != 0)
+ goto err;
+
+ if ((ret = pthread_create(&interface_thr,
+ &attr, master_loop, (void *)dbenv)) != 0)
+ goto err;
+
+err: if ((t_ret = pthread_attr_destroy(&attr)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+static void *
+master_loop(dbenvv)
+ void *dbenvv;
+{
+ DB *dbp;
+ DB_ENV *dbenv;
+ DB_TXN *txn;
+ DBT key, data;
+ char buf[BUFSIZE], *rbuf;
+ int ret;
+
+ dbp = NULL;
+ txn = NULL;
+
+ dbenv = (DB_ENV *)dbenvv;
+ /*
+ * Check if the database exists and if it verifies cleanly.
+ * If it does, run with it; else recreate it and go. Note
+ * that we have to verify outside of the environment.
+ */
+#ifdef NOTDEF
+ if ((ret = db_create(&dbp, NULL, 0)) != 0)
+ return (ret);
+ if ((ret = dbp->verify(dbp, DATABASE, NULL, NULL, 0)) != 0) {
+ if ((ret = dbp->remove(dbp, DATABASE, NULL, 0)) != 0 &&
+ ret != DB_NOTFOUND && ret != ENOENT)
+ return (ret);
+#endif
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return ((void *)ret);
+
+ if ((ret = dbp->open(dbp, DATABASE,
+ NULL, DB_BTREE, DB_CREATE /* | DB_THREAD */, 0)) != 0)
+ goto err;
+
+#ifdef NOTDEF
+ } else {
+ /* Reopen in the environment. */
+ if ((ret = dbp->close(dbp, 0)) != 0)
+ return (ret);
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ return (ret);
+ if ((ret = dbp->open(dbp,
+ DATABASE, NULL, DB_UNKNOWN, DB_THREAD, 0)) != 0)
+ goto err;
+ }
+#endif
+ /*
+ * XXX
+ * It would probably be kind of cool to do this in Tcl and
+ * have a nice GUI. It would also be cool to be independently
+ * wealthy.
+ */
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+ for (;;) {
+ printf("QUOTESERVER> ");
+ fflush(stdout);
+
+ if (fgets(buf, sizeof(buf), stdin) == NULL)
+ break;
+ (void)strtok(&buf[0], " \t\n");
+ rbuf = strtok(NULL, " \t\n");
+ if (rbuf == NULL || rbuf[0] == '\0') {
+ if (strncmp(buf, "exit", 4) == 0 ||
+ strncmp(buf, "quit", 4) == 0)
+ break;
+ fprintf(stderr, "Format: TICKER VALUE\n");
+ continue;
+ }
+
+ key.data = buf;
+ key.size = strlen(buf);
+
+ data.data = rbuf;
+ data.size = strlen(rbuf);
+
+ if ((ret = dbenv->txn_begin(dbenv, NULL, &txn, 0)) != 0)
+ goto err;
+ switch (ret =
+ dbp->put(dbp, txn, &key, &data, 0)) {
+ case 0:
+ break;
+ default:
+ dbp->err(dbp, ret, "DB->put");
+ if (ret != DB_KEYEXIST)
+ goto err;
+ break;
+ }
+ ret = txn->commit(txn, 0);
+ txn = NULL;
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (txn != NULL)
+ (void)txn->abort(txn);
+
+ if (dbp != NULL)
+ (void)dbp->close(dbp, DB_NOSYNC);
+
+ return ((void *)ret);
+}
+
diff --git a/db/examples_c/ex_repquote/ex_rq_net.c b/db/examples_c/ex_repquote/ex_rq_net.c
new file mode 100644
index 000000000..96d5c8939
--- /dev/null
+++ b/db/examples_c/ex_repquote/ex_rq_net.c
@@ -0,0 +1,724 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: ex_rq_net.c,v 1.20 2001/10/13 13:13:16 bostic Exp
+ */
+
+#include <sys/types.h>
+
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+
+#include <assert.h>
+#include <errno.h>
+#include <netdb.h>
+#include <pthread.h>
+#include <queue.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+int machtab_add __P((machtab_t *, int, u_int32_t, int, int *));
+
+/*
+ * This file defines the communication infrastructure for the ex_repquote
+ * sample application.
+ *
+ * This application uses TCP/IP for its communication. In an N-site
+ * replication group, this means that there are N * N communication
+ * channels so that every site can communicate with every other site
+ * (this allows elections to be held when the master fails). We do
+ * not require that anyone know about all sites when the application
+ * starts up. In order to communicate, the application should know
+ * about someone, else it has no idea how to ever get in the game.
+ *
+ * Communication is handled via a number of different threads. These
+ * thread functions are implemented in rep_util.c In this file, we
+ * define the data structures that maintain the state that describes
+ * the comm infrastructure, the functions that manipulates this state
+ * and the routines used to actually send and receive data over the
+ * sockets.
+ */
+
+/*
+ * The communication infrastructure is represented by a machine table,
+ * machtab_t, which is essentially a mutex-protected linked list of members
+ * of the group. The machtab also contains the parameters that are needed
+ * to call for an election. We hardwire values for these parameters in the
+ * init function, but these could be set via some configuration setup in a
+ * real application. We reserve the machine-id 1 to refer to ourselves and
+ * make the machine-id 0 be invalid.
+ */
+
+#define MACHID_INVALID 0
+#define MACHID_SELF 1
+
+struct __machtab {
+ LIST_HEAD(__machlist, __member) machlist;
+ int nextid;
+ pthread_mutex_t mtmutex;
+ u_int32_t check_time;
+ u_int32_t elect_time;
+ int current;
+ int max;
+ int nsites;
+ int priority;
+};
+
+/* Data structure that describes each entry in the machtab. */
+struct __member {
+ u_int32_t hostaddr; /* Host IP address. */
+ int port; /* Port number. */
+ int eid; /* Application-specific machine id. */
+ int fd; /* File descriptor for the socket. */
+ LIST_ENTRY(__member) links;
+ /* For linked list of all members we know of. */
+};
+
+static int quote_send_broadcast __P((DB_ENV *,
+ machtab_t *, const DBT *, DBT *, u_int32_t));
+static int quote_send_one __P((DB_ENV *, const DBT *, DBT *, int, int, u_int32_t));
+
+/*
+ * machtab_init --
+ * Initialize the machine ID table.
+ * XXX Right now I treat the number of sites as the maximum
+ * number we've ever had on the list at one time. We probably
+ * want to make that smarter.
+ */
+int
+machtab_init(machtabp, pri, nsites)
+ machtab_t **machtabp;
+ int pri, nsites;
+{
+ int ret;
+ machtab_t *machtab;
+
+ if ((machtab = malloc(sizeof(machtab_t))) == NULL)
+ return (ENOMEM);
+
+ LIST_INIT(&machtab->machlist);
+
+ /* Reserve eid's 0 and 1. */
+ machtab->nextid = 2;
+/* microseconds in millisecond */
+#define MS 1000
+ machtab->check_time = 500 * MS;
+ machtab->elect_time = 2 * 1000 * MS;
+ machtab->current = machtab->max = 0;
+ machtab->priority = pri;
+ machtab->nsites = nsites;
+
+ ret = pthread_mutex_init(&machtab->mtmutex, NULL);
+
+ *machtabp = machtab;
+
+ return (ret);
+}
+
+/*
+ * machtab_add --
+ * Add a file descriptor to the table of machines, returning
+ * a new machine ID.
+ */
+int
+machtab_add(machtab, fd, hostaddr, port, idp)
+ machtab_t *machtab;
+ int fd;
+ u_int32_t hostaddr;
+ int port, *idp;
+{
+ int ret;
+ member_t *m, *member;
+
+ if ((member = malloc(sizeof(member_t))) == NULL)
+ return (ENOMEM);
+
+ member->fd = fd;
+ member->hostaddr = hostaddr;
+ member->port = port;
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ for (m = LIST_FIRST(&machtab->machlist);
+ m != NULL; m = LIST_NEXT(m, links))
+ if (m->hostaddr == hostaddr && m->port == port)
+ break;
+
+ if (m == NULL) {
+ member->eid = machtab->nextid++;
+ LIST_INSERT_HEAD(&machtab->machlist, member, links);
+ } else
+ member->eid = m->eid;
+
+ ret = pthread_mutex_unlock(&machtab->mtmutex);
+
+ if (idp != NULL)
+ *idp = member->eid;
+
+ if (m == NULL) {
+ if (++machtab->current > machtab->max)
+ machtab->max = machtab->current;
+#ifdef APP_DEBUG
+printf("%lx Adding to machtab: %lx:%d at eid %d\n",
+(long)pthread_self(), (long)hostaddr, port, member->eid);
+#endif
+ } else {
+#ifdef APP_DEBUG
+printf("%lx Found in machtab: %lx:%d at eid %d\n",
+(long)pthread_self(), (long)hostaddr, port, member->eid);
+#endif
+ free(member);
+ ret = EEXIST;
+ }
+ return (ret);
+}
+
+/*
+ * machtab_getinfo --
+ * Return host and port information for a particular machine id.
+ */
+int
+machtab_getinfo(machtab, eid, hostp, portp)
+ machtab_t *machtab;
+ int eid;
+ u_int32_t *hostp;
+ int *portp;
+{
+ int ret;
+ member_t *member;
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ for (member = LIST_FIRST(&machtab->machlist);
+ member != NULL;
+ member = LIST_NEXT(member, links))
+ if (member->eid == eid) {
+ *hostp = member->hostaddr;
+ *portp = member->port;
+ break;
+ }
+
+ if ((ret = pthread_mutex_unlock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ return (member != NULL ? 0 : EINVAL);
+}
+
+/*
+ * machtab_rem --
+ * Remove a mapping from the table of machines. Lock indicates
+ * whether we need to lock the machtab or not (0 indicates we do not
+ * need to lock; non-zero indicates that we do need to lock).
+ */
+int
+machtab_rem(machtab, eid, lock)
+ machtab_t *machtab;
+ int eid;
+ int lock;
+{
+ int found, ret;
+ member_t *member;
+
+ ret = 0;
+ if (lock && (ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ for (found = 0, member = LIST_FIRST(&machtab->machlist);
+ member != NULL;
+ member = LIST_NEXT(member, links))
+ if (member->eid == eid) {
+#ifdef APP_DEBUG
+printf("%lx Removing eid %d %lx:%d\n", (long)pthread_self, member->eid,
+(long)member->hostaddr, member->port);
+#endif
+ found = 1;
+ LIST_REMOVE(member, links);
+ (void)close(member->fd);
+ free(member);
+ machtab->current--;
+ break;
+ }
+
+ if (LIST_FIRST(&machtab->machlist) == NULL)
+ machtab->nextid = 2;
+
+ if (lock)
+ ret = pthread_mutex_unlock(&machtab->mtmutex);
+
+ return (ret);
+}
+
+void
+machtab_parm(machtab, nump, prip, checkp, electp)
+ machtab_t *machtab;
+ int *nump, *prip;
+ u_int32_t *checkp, *electp;
+{
+ if (machtab->nsites == 0)
+ *nump = machtab->max;
+ else
+ *nump = machtab->nsites;
+ *prip = machtab->priority;
+ *checkp = machtab->check_time;
+ *electp = machtab->elect_time;
+}
+
+/*
+ * listen_socket_init --
+ * Initialize a socket for listening on the specified port. Returns
+ * a file descriptor for the socket, ready for an accept() call
+ * in a thread that we're happy to let block.
+ */
+int
+listen_socket_init(progname, port)
+ char *progname;
+ int port;
+{
+ int s;
+ struct protoent *proto;
+ struct sockaddr_in si;
+
+ if ((proto = getprotobyname("tcp")) == NULL)
+ return (-1);
+
+ if ((s = socket(AF_INET, SOCK_STREAM, proto->p_proto)) < 0)
+ return (-1);
+
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_addr.s_addr = htonl(INADDR_ANY);
+ si.sin_port = htons(port);
+
+ if (bind(s, (struct sockaddr *)&si, sizeof(si)) != 0)
+ goto err;
+
+ if (listen(s, 5) != 0)
+ goto err;
+
+ return (s);
+
+err: fprintf(stderr, "%s: %s", progname, strerror(errno));
+ close (s);
+ return (-1);
+}
+
+/*
+ * listen_socket_accept --
+ * Accept a connection on a socket. This is essentially just a wrapper
+ * for accept(3).
+ */
+int
+listen_socket_accept(machtab, progname, s, eidp)
+ machtab_t *machtab;
+ char *progname;
+ int s, *eidp;
+{
+ int host, ns, port, ret;
+ size_t si_len;
+ struct sockaddr_in si;
+
+ COMPQUIET(progname, NULL);
+
+wait: memset(&si, 0, sizeof(si));
+ si_len = sizeof(si);
+ ns = accept(s, (struct sockaddr *)&si, &si_len);
+ host = ntohl(si.sin_addr.s_addr);
+ port = ntohs(si.sin_port);
+ ret = machtab_add(machtab, ns, host, port, eidp);
+ if (ret == EEXIST) {
+ close(ns);
+ goto wait;
+ } else if (ret != 0)
+ goto err;
+
+ return (ns);
+
+err: close(ns);
+ return (-1);
+}
+
+/*
+ * get_accepted_socket --
+ * Listen on the specified port, and return a file descriptor
+ * when we have accepted a connection on it.
+ */
+int
+get_accepted_socket(progname, port)
+ char *progname;
+ int port;
+{
+ int s, ns;
+ size_t si_len;
+ struct protoent *proto;
+ struct sockaddr_in si;
+
+ if ((proto = getprotobyname("tcp")) == NULL)
+ return (-1);
+
+ if ((s = socket(AF_INET, SOCK_STREAM, proto->p_proto)) < 0)
+ return (-1);
+
+ memset(&si, 0, sizeof(si));
+ si.sin_family = AF_INET;
+ si.sin_addr.s_addr = htonl(INADDR_ANY);
+ si.sin_port = htons(port);
+
+ if (bind(s, (struct sockaddr *)&si, sizeof(si)) != 0)
+ goto err;
+
+ if (listen(s, 5) != 0)
+ goto err;
+
+ memset(&si, 0, sizeof(si));
+ si_len = sizeof(si);
+ ns = accept(s, (struct sockaddr *)&si, &si_len);
+
+ /* XXX I think we may want to pass up the identify of the
+ * connecting host so we can check for duplicates. For
+ * debugging, let's just display it for now.
+ */
+ return (ns);
+
+err: fprintf(stderr, "%s: %s", progname, strerror(errno));
+ close (s);
+ return (-1);
+}
+
+/*
+ * get_connected_socket --
+ * Connect to the specified port of the specified remote machine,
+ * and return a file descriptor when we have accepted a connection on it.
+ * Add this connection to the machtab. If we already have a connection
+ * open to this machine, then don't create another one, return the eid
+ * of the connection (in *eidp) and set is_open to 1. Return 0.
+ */
+int
+get_connected_socket(machtab, progname, remotehost, port, is_open, eidp)
+ machtab_t *machtab;
+ char *progname, *remotehost;
+ int port, *is_open, *eidp;
+{
+ int ret, s;
+ struct hostent *hp;
+ struct protoent *proto;
+ struct sockaddr_in si;
+ u_int32_t addr;
+
+ *is_open = 0;
+
+ if ((proto = getprotobyname("tcp")) == NULL)
+ return (-1);
+
+ if ((hp = gethostbyname(remotehost)) == NULL) {
+ fprintf(stderr, "%s: host not found: %s\n", progname,
+ strerror(errno));
+ return (-1);
+ }
+
+ if ((s = socket(AF_INET, SOCK_STREAM, proto->p_proto)) < 0)
+ return (-1);
+ memset(&si, 0, sizeof(si));
+ memcpy((char *)&si.sin_addr, hp->h_addr, hp->h_length);
+ addr = ntohl(si.sin_addr.s_addr);
+ ret = machtab_add(machtab, s, addr, port, eidp);
+ if (ret == EEXIST) {
+ *is_open = 1;
+ close(s);
+ return (0);
+ } else if (ret != 0) {
+ close (s);
+ return (-1);
+ }
+
+ si.sin_family = AF_INET;
+ si.sin_port = htons(port);
+ if (connect(s, (struct sockaddr *)&si, sizeof(si)) < 0) {
+ fprintf(stderr, "%s: connection failed: %s",
+ progname, strerror(errno));
+ (void)machtab_rem(machtab, *eidp, 1);
+ return (-1);
+ }
+
+ return (s);
+}
+
+/*
+ * get_next_message --
+ * Read a single message from the specified file descriptor, and
+ * return it in the format used by rep functions (two DBTs and a type).
+ *
+ * This function will become the guts of f_receive, but is also used
+ * directly by code that plans to do the equivalent outside a callback,
+ * and manually dispatch to DB_ENV->rep_process_message()
+ */
+int
+get_next_message(fd, rec, control)
+ int fd;
+ DBT *rec, *control;
+{
+ size_t nr, nleft;
+ u_int32_t rsize, csize;
+ u_int8_t *recbuf, *controlbuf;
+
+ /*
+ * The protocol we use on the wire is dead simple:
+ *
+ * 4 bytes - rec->size
+ * (# read above) - rec->data
+ * 4 bytes - control->size
+ * (# read above) - control->data
+ */
+
+ /* Read rec->size. */
+ nr = read(fd, &rsize, 4);
+ if (nr != 4)
+ return (1);
+
+ /* Read the record itself. */
+ if (rsize > 0) {
+ if (rec->size < rsize)
+ rec->data = realloc(rec->data, rsize);
+ nleft = rsize;
+ recbuf = rec->data;
+ while (nleft > 0) {
+ nr = read(fd, recbuf, nleft);
+ if (nr <= 0)
+ return (1);
+ nleft -= nr;
+ recbuf += nr;
+ }
+ } else {
+ if (rec->data != NULL)
+ free(rec->data);
+ rec->data = NULL;
+ }
+ rec->size = rsize;
+
+ /* Read control->size. */
+ nr = read(fd, &csize, 4);
+ if (nr != 4)
+ return (1);
+
+ /* Read the control struct itself. */
+ if (csize > 0) {
+ controlbuf = control->data;
+ if (control->size < csize)
+ controlbuf = realloc(controlbuf, csize);
+ nr = read(fd, controlbuf, csize);
+ if (nr != csize)
+ return (1);
+ } else {
+ if (control->data != NULL)
+ free(control->data);
+ controlbuf = NULL;
+ }
+ control->data = controlbuf;
+ control->size = csize;
+
+#ifdef APP_DEBUG_MSG
+ {
+ REP_CONTROL *rp;
+
+ rp = (REP_CONTROL *)control->data;
+ fprintf(stderr,
+ "%lx Received message type %d gen %d lsn[%d,%d] flags %lx\n",
+ (long)pthread_self(),
+ rp->rectype, rp->gen, rp->lsn.file, rp->lsn.offset,
+ (unsigned long)rp->flags);
+ if (rp->rectype == REP_LOG)
+ __db_loadme();
+ }
+#endif
+
+ return (0);
+}
+
+/*
+ * quote_send --
+ * The f_send function for DB_ENV->set_rep_transport.
+ */
+int
+quote_send(dbenv, cookie, rec, control, flags, eid)
+ DB_ENV *dbenv;
+ void *cookie;
+ const DBT *rec;
+ DBT *control;
+ u_int32_t flags;
+ int eid;
+{
+ int fd, n, ret, t_ret;
+ machtab_t *machtab;
+ member_t *m;
+
+ machtab = (machtab_t *)cookie;
+
+ if (eid == DB_BROADCAST_EID) {
+ /*
+ * Right now, we do not require successful transmission.
+ * I'd like to move this requiring at least one successful
+ * transmission on PERMANENT requests.
+ */
+ n = quote_send_broadcast(dbenv, machtab, rec, control, flags);
+ if (n < 0 /*|| (n == 0 && LF_ISSET(DB_REP_PERMANENT))*/)
+ return (DB_REP_UNAVAIL);
+ return (0);
+ }
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (ret);
+
+ fd = 0;
+ for (m = LIST_FIRST(&machtab->machlist); m != NULL;
+ m = LIST_NEXT(m, links)) {
+ if (m->eid == eid) {
+ fd = m->fd;
+ break;
+ }
+ }
+
+ if (fd == 0) {
+ dbenv->err(dbenv, DB_REP_UNAVAIL,
+ "quote_send: cannot find machine ID %d", eid);
+ return (DB_REP_UNAVAIL);
+ }
+
+ ret = quote_send_one(dbenv, rec, control, eid, fd, flags);
+
+ if ((t_ret = (pthread_mutex_unlock(&machtab->mtmutex))) != 0 &&
+ ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * quote_send_broadcast --
+ * Send a message to everybody.
+ * Returns the number of sites to which this message was successfully
+ * communicated. A -1 indicates a fatal error.
+ */
+static int
+quote_send_broadcast(dbenv, machtab, rec, control, flags)
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+ const DBT *rec;
+ DBT *control;
+ u_int32_t flags;
+{
+ int ret, sent;
+ member_t *m, *next;
+
+ if ((ret = pthread_mutex_lock(&machtab->mtmutex)) != 0)
+ return (0);
+
+ sent = 0;
+ for (m = LIST_FIRST(&machtab->machlist); m != NULL; m = next) {
+ next = LIST_NEXT(m, links);
+ if ((ret =
+ quote_send_one(dbenv, rec, control, m->eid, m->fd, flags)) != 0) {
+ (void)machtab_rem(machtab, m->eid, 0);
+ } else
+ sent++;
+ }
+
+ if (pthread_mutex_unlock(&machtab->mtmutex) != 0)
+ return (-1);
+
+ return (sent);
+}
+
+/*
+ * quote_send_one --
+ * Send a message to a single machine, given that machine's file
+ * descriptor.
+ *
+ * !!!
+ * Note that the machtab mutex should be held through this call.
+ * It doubles as a synchronizer to make sure that two threads don't
+ * intersperse writes that are part of two single messages.
+ */
+static int
+quote_send_one(dbenv, rec, control, eid, fd, flags)
+ DB_ENV *dbenv;
+ const DBT *rec;
+ DBT *control;
+ int eid, fd;
+ u_int32_t flags;
+
+{
+ int retry;
+ ssize_t bytes_left, nw;
+ u_int8_t *wp;
+
+ COMPQUIET(flags, 0);
+
+ /*
+ * The protocol is simply: write rec->size, write rec->data,
+ * write control->size, write control->data.
+ */
+ nw = write(fd, &rec->size, 4);
+ if (nw != 4)
+ return (DB_REP_UNAVAIL);
+
+ if (rec->size > 0) {
+ nw = write(fd, rec->data, rec->size);
+ if (nw < 0)
+ return (DB_REP_UNAVAIL);
+ if (nw != (ssize_t)rec->size) {
+ /* Try a couple of times to finish the write. */
+ wp = (u_int8_t *)rec->data + nw;
+ bytes_left = rec->size - nw;
+ for (retry = 0; bytes_left > 0 && retry < 3; retry++) {
+ nw = write(fd, wp, bytes_left);
+ if (nw < 0)
+ return (DB_REP_UNAVAIL);
+ bytes_left -= nw;
+ wp += nw;
+ }
+ if (bytes_left > 0)
+ return (DB_REP_UNAVAIL);
+ }
+ }
+
+ nw = write(fd, &control->size, 4);
+ if (nw != 4)
+ return (DB_REP_UNAVAIL);
+ if (control->size > 0) {
+ nw = write(fd, control->data, control->size);
+ if (nw != (ssize_t)control->size)
+ return (DB_REP_UNAVAIL);
+ }
+
+#ifdef APP_DEBUG_MSG
+ {
+ REP_CONTROL *rp;
+
+ rp = (REP_CONTROL *)control->data;
+ fprintf(stderr,
+ "%lx Sent to %d message type %d, gen %d lsn [%d,%d] flags %lx\n",
+ (long)pthread_self(), eid,
+ rp->rectype, rp->gen, rp->lsn.file, rp->lsn.offset,
+ (unsigned long)rp->flags);
+ if (rp->rectype == REP_LOG)
+ __db_loadme();
+ }
+#else
+ COMPQUIET(eid, 0);
+ COMPQUIET(dbenv, NULL);
+#endif
+
+ return (0);
+}
diff --git a/db/examples_c/ex_repquote/ex_rq_util.c b/db/examples_c/ex_repquote/ex_rq_util.c
new file mode 100644
index 000000000..9dee69d45
--- /dev/null
+++ b/db/examples_c/ex_repquote/ex_rq_util.c
@@ -0,0 +1,439 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: ex_rq_util.c,v 1.10 2001/10/13 13:13:16 bostic Exp
+ */
+
+#include <sys/types.h>
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <db.h>
+
+#include "ex_repquote.h"
+
+static int connect_site __P((DB_ENV *, machtab_t *, char *,
+ site_t *, int *, int *));
+void * elect_thread __P((void *));
+
+typedef struct {
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+} elect_args;
+
+typedef struct {
+ DB_ENV *dbenv;
+ char *progname;
+ char *home;
+ int fd;
+ u_int32_t eid;
+ machtab_t *tab;
+} hm_loop_args;
+
+/*
+ * This is a generic message handling loop that is used both by the
+ * master to accept messages from a client as well as by clients
+ * to communicate with other clients.
+ */
+void *
+hm_loop(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ DBT rec, control;
+ char *c, *home, *progname;
+ int fd, is_me, eid, n, newm;
+ int open, pri, r, ret, t_ret, tmpid;
+ elect_args *ea;
+ hm_loop_args *ha;
+ machtab_t *tab;
+ pthread_t elect_thr;
+ site_t self;
+ u_int32_t check, elect;
+ void *status;
+
+ ea = NULL;
+
+ ha = (hm_loop_args *)args;
+ dbenv = ha->dbenv;
+ fd = ha->fd;
+ home = ha->home;
+ eid = ha->eid;
+ progname = ha->progname;
+ tab = ha->tab;
+ free(ha);
+
+ memset(&rec, 0, sizeof(DBT));
+ memset(&control, 0, sizeof(DBT));
+
+ for (ret = 0; ret == 0;) {
+ if ((ret = get_next_message(fd, &rec, &control)) != 0) {
+#ifdef APP_DEBUG
+printf("%lx get_next_message for eid = %d failed\n", (long)pthread_self(), eid);
+#endif
+ /*
+ * Close this connection; if it's the master call
+ * for an election.
+ */
+ close(fd);
+ if ((ret = machtab_rem(tab, eid, 1)) != 0)
+ break;
+
+ /*
+ * If I'm the master, I just lost a client and this
+ * thread is done.
+ */
+ if (master_eid == SELF_EID)
+ break;
+
+ /*
+ * If I was talking with the master and the master
+ * went away, I need to call an election; else I'm
+ * done.
+ */
+ if (master_eid != eid)
+ break;
+
+ master_eid = DB_INVALID_EID;
+ machtab_parm(tab, &n, &pri, &check, &elect);
+ if ((ret = dbenv->rep_elect(dbenv,
+ n, pri, check, elect, &newm, &is_me)) != 0)
+ continue;
+#ifdef APP_DEBUG
+ printf("%lx Election returned new master %d%s\n",
+ (long)pthread_self(),
+ newm, is_me ? "(me)" : "");
+#endif
+ /*
+ * Regardless of the results, the site I was talking
+ * to is gone, so I have nothing to do but exit.
+ */
+ if (is_me && (ret = dbenv->rep_start(dbenv,
+ NULL, DB_REP_MASTER)) == 0)
+ ret = domaster(dbenv, progname);
+ break;
+ }
+
+ tmpid = eid;
+ switch(r = dbenv->rep_process_message(dbenv,
+ &rec, &control, &tmpid)) {
+ case DB_REP_NEWSITE:
+ /*
+ * Check if we got sent connect information and if we
+ * did, if this is me or if we already have a
+ * connection to this new site. If we don't,
+ * establish a new one.
+ */
+#ifdef APP_DEBUG
+printf("Received NEWSITE return for %s\n", rec.size == 0 ? "" : (char *)rec.data);
+#endif
+ /* No connect info. */
+ if (rec.size == 0)
+ break;
+
+ /* It's me, do nothing. */
+ if (strncmp(myaddr, rec.data, rec.size) == 0) {
+#ifdef APP_DEBUG
+printf("New site was me\n");
+#endif
+ break;
+ }
+
+ self.host = (char *)rec.data;
+ self.host = strtok(self.host, ":");
+ if ((c = strtok(NULL, ":")) == NULL) {
+ fprintf(stderr, "Bad host specification.\n");
+ goto out;
+ }
+ self.port = atoi(c);
+
+ /*
+ * We try to connect to the new site. If we can't,
+ * we treat it as an error since we know that the site
+ * should be up if we got a message from it (even
+ * indirectly).
+ */
+ ret = connect_site(dbenv,
+ tab, progname, &self, &open, &eid);
+#ifdef APP_DEBUG
+printf("Forked thread for new site: %d\n", eid);
+#endif
+ if (ret != 0)
+ goto out;
+ break;
+ case DB_REP_HOLDELECTION:
+ if (master_eid == SELF_EID)
+ break;
+ /* Make sure that previous election has finished. */
+ if (ea != NULL) {
+ (void)pthread_join(elect_thr, &status);
+ ea = NULL;
+ }
+ if ((ea = calloc(sizeof(elect_args), 1)) == NULL) {
+ ret = errno;
+ goto out;
+ }
+ ea->dbenv = dbenv;
+ ea->machtab = tab;
+#ifdef APP_DEBUG
+printf("%lx Forking off election thread\n", (long)pthread_self());
+#endif
+ ret = pthread_create(&elect_thr,
+ NULL, elect_thread, (void *)ea);
+ break;
+ case DB_REP_NEWMASTER:
+ /* Check if it's us. */
+#ifdef APP_DEBUG
+printf("%lx Got new master message %d\n", (long)pthread_self(), tmpid);
+#endif
+ master_eid = tmpid;
+ if (tmpid == SELF_EID) {
+ if ((ret = dbenv->rep_start(dbenv,
+ NULL, DB_REP_MASTER)) != 0)
+ goto out;
+ ret = domaster(dbenv, progname);
+ }
+ break;
+ case 0:
+ break;
+ default:
+ fprintf(stderr, "%s: %s", progname, db_strerror(r));
+ break;
+ }
+ }
+#ifdef APP_DEBUG
+printf("%lx Breaking out of loop in hm_loop: %s\n",
+(long)pthread_self(), db_strerror(ret));
+#endif
+
+out: if ((t_ret = machtab_rem(tab, eid, 1)) != 0 && ret == 0)
+ ret = t_ret;
+
+ /* Don't close the environment before any children exit. */
+ if (ea != NULL)
+ (void)pthread_join(elect_thr, &status);
+
+ return ((void *)ret);
+}
+
+/*
+ * This is a generic thread that spawns a thread to listen for connections
+ * on a socket and then spawns off child threads to handle each new
+ * connection.
+ */
+void *
+connect_thread(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ char *home;
+ char *progname;
+ int fd, i, eid, ns, port, ret;
+ hm_loop_args *ha;
+ connect_args *cargs;
+ machtab_t *machtab;
+#define MAX_THREADS 25
+ pthread_t hm_thrs[MAX_THREADS];
+ pthread_attr_t attr;
+
+ ha = NULL;
+ cargs = (connect_args *)args;
+ dbenv = cargs->dbenv;
+ home = cargs->home;
+ progname = cargs->progname;
+ machtab = cargs->machtab;
+ port = cargs->port;
+
+ if ((ret = pthread_attr_init(&attr)) != 0)
+ return ((void *)EXIT_FAILURE);
+
+ if ((ret =
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) != 0)
+ goto err;
+
+ /*
+ * Loop forever, accepting connections from new machines,
+ * and forking off a thread to handle each.
+ */
+ if ((fd = listen_socket_init(progname, port)) < 0) {
+ ret = errno;
+ goto err;
+ }
+
+ for (i = 0; i < MAX_THREADS; i++) {
+ if ((ns = listen_socket_accept(machtab,
+ progname, fd, &eid)) < 0) {
+ ret = errno;
+ goto err;
+ }
+ if ((ha = calloc(sizeof(hm_loop_args), 1)) == NULL)
+ goto err;
+ ha->progname = progname;
+ ha->home = home;
+ ha->fd = ns;
+ ha->eid = eid;
+ ha->tab = machtab;
+ ha->dbenv = dbenv;
+ if ((ret = pthread_create(&hm_thrs[i++], &attr,
+ hm_loop, (void *)ha)) != 0)
+ goto err;
+ ha = NULL;
+ }
+
+ /* If we fell out, we ended up with too many threads. */
+ fprintf(stderr, "Too many threads!\n");
+ ret = ENOMEM;
+
+err: pthread_attr_destroy(&attr);
+ return (ret == 0 ? (void *)EXIT_SUCCESS : (void *)EXIT_FAILURE);
+}
+
+/*
+ * Open a connection to everyone that we've been told about. If we
+ * cannot open some connections, keep trying.
+ */
+void *
+connect_all(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ all_args *aa;
+ char *home, *progname;
+ hm_loop_args *ha;
+ int failed, i, eid, nsites, open, ret, *success;
+ machtab_t *machtab;
+ site_t *sites;
+
+ ha = NULL;
+ aa = (all_args *)args;
+ dbenv = aa->dbenv;
+ progname = aa->progname;
+ home = aa->home;
+ machtab = aa->machtab;
+ nsites = aa->nsites;
+ sites = aa->sites;
+
+ ret = 0;
+ if ((success = calloc(nsites, sizeof(int))) == NULL) {
+ fprintf(stderr, "%s: %s\n", progname, strerror(errno));
+ ret = 1;
+ goto err;
+ }
+
+ for (failed = nsites; failed > 0;) {
+ for (i = 0; i < nsites; i++) {
+ if (success[i])
+ continue;
+
+ ret = connect_site(dbenv, machtab,
+ progname, &sites[i], &open, &eid);
+
+ /*
+ * If we couldn't make the connection, this isn't
+ * fatal to the loop, but we have nothing further
+ * to do on this machine at the moment.
+ */
+ if (ret == DB_REP_UNAVAIL)
+ continue;
+
+ if (ret != 0)
+ goto err;
+
+ failed--;
+ success[i] = 1;
+
+ /* If the connection is already open, we're done. */
+ if (ret == 0 && open == 1)
+ continue;
+
+ }
+ sleep(1);
+ }
+
+err: free(success);
+ return (ret ? (void *)EXIT_FAILURE : (void *)EXIT_SUCCESS);
+}
+
+int
+connect_site(dbenv, machtab, progname, site, is_open, eidp)
+ DB_ENV *dbenv;
+ machtab_t *machtab;
+ char *progname;
+ site_t *site;
+ int *is_open;
+ int *eidp;
+{
+ int ret, s;
+ hm_loop_args *ha;
+ pthread_t hm_thr;
+
+ if ((s = get_connected_socket(machtab, progname,
+ site->host, site->port, is_open, eidp)) < 0)
+ return (DB_REP_UNAVAIL);
+
+ if (*is_open)
+ return (0);
+
+ if ((ha = calloc(sizeof(hm_loop_args), 1)) == NULL) {
+ ret = errno;
+ goto err;
+ }
+
+ ha->progname = progname;
+ ha->fd = s;
+ ha->eid = *eidp;
+ ha->tab = machtab;
+ ha->dbenv = dbenv;
+
+ if ((ret = pthread_create(&hm_thr, NULL,
+ hm_loop, (void *)ha)) != 0) {
+ fprintf(stderr,"%s: System error %s\n",
+ progname, strerror(ret));
+ goto err1;
+ }
+
+ return (0);
+
+err1: free(ha);
+err:
+ return (ret);
+}
+
+/*
+ * We need to spawn off a new thread in which to hold an election in
+ * case we are the only thread listening on for messages.
+ */
+void *
+elect_thread(args)
+ void *args;
+{
+ DB_ENV *dbenv;
+ elect_args *eargs;
+ int is_me, n, ret, pri;
+ machtab_t *machtab;
+ u_int32_t check, elect;
+
+ eargs = (elect_args *)args;
+ dbenv = eargs->dbenv;
+ machtab = eargs->machtab;
+ free(eargs);
+
+ machtab_parm(machtab, &n, &pri, &check, &elect);
+ while ((ret = dbenv->rep_elect(dbenv,
+ n, pri, check, elect, &master_eid, &is_me)) != 0)
+ sleep(2);
+
+ /* Check if it's us. */
+ if (is_me)
+ ret = dbenv->rep_start(dbenv, NULL, DB_REP_MASTER);
+
+ return ((void *)(ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE));
+}
diff --git a/db/include/rep.h b/db/include/rep.h
new file mode 100644
index 000000000..0cf2469b4
--- /dev/null
+++ b/db/include/rep.h
@@ -0,0 +1,144 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#ifndef _REP_H_
+#define _REP_H_
+
+#define REP_ALIVE 1 /* I am alive message. */
+#define REP_ALIVE_REQ 2 /* Request for alive messages. */
+#define REP_ALL_REQ 3 /* Request all log records greater than LSN. */
+#define REP_ELECT 4 /* Indicates that all listeners should */
+ /* begin master election */
+#define REP_FILE 6 /* Page of a database file. */
+#define REP_FILE_REQ 7 /* Request for a database file. */
+#define REP_LOG 8 /* Log record. */
+#define REP_LOG_REQ 9 /* Request for a log record. */
+#define REP_MASTER_REQ 10 /* Who is the master */
+#define REP_NEWCLIENT 11 /* Announces the presence of a new client. */
+#define REP_NEWFILE 12 /* Announce a log file change. */
+#define REP_NEWMASTER 13 /* Announces who the master is. */
+#define REP_NEWSITE 14 /* Announces that a site has heard from a new
+ * site; like NEWCLIENT, but indirect. A
+ * NEWCLIENT message comes directly from the new
+ * client while a NEWSITE comes indirectly from
+ * someone who heard about a NEWSITE.
+ */
+#define REP_PAGE 15 /* Database page. */
+#define REP_PAGE_REQ 16 /* Request for a database page. */
+#define REP_PLIST 17 /* Database page list. */
+#define REP_PLIST_REQ 18 /* Request for a page list. */
+#define REP_VERIFY 19 /* A log record for verification. */
+#define REP_VERIFY_FAIL 20 /* The client is outdated. */
+#define REP_VERIFY_REQ 21 /* Request for a log record to verify. */
+#define REP_VOTE1 22 /* Send out your information for an election. */
+#define REP_VOTE2 23 /* Send a "you are master" vote. */
+
+/* Used to consistently designate which messages ought to be received where. */
+#define MASTER_ONLY(dbenv) \
+ if (!F_ISSET(dbenv, DB_ENV_REP_MASTER)) return (EINVAL)
+
+#define CLIENT_ONLY(dbenv) \
+ if (!F_ISSET(dbenv, DB_ENV_REP_CLIENT)) return (EINVAL)
+
+#define ANYSITE(dbenv)
+
+/* Shared replication structure. */
+
+typedef struct __rep {
+ DB_MUTEX mutex; /* Region lock. */
+ u_int32_t tally_off; /* Offset of the tally region. */
+ int eid; /* Environment id. */
+ int master_id; /* ID of the master site. */
+ u_int32_t gen; /* Replication generation number */
+ int asites; /* Space allocated for sites. */
+ int nsites; /* Number of sites in group. */
+ int priority; /* My priority in an election. */
+
+ /* Vote tallying information. */
+ int sites; /* Sites heard from. */
+ int winner; /* Current winner. */
+ int w_priority; /* Winner priority. */
+ u_int32_t w_gen; /* Winner generation. */
+ DB_LSN w_lsn; /* Winner LSN. */
+ int votes; /* Number of votes for this site. */
+
+#define REP_F_EPHASE1 0x01 /* In phase 1 of election. */
+#define REP_F_EPHASE2 0x02 /* In phase 2 of election. */
+#define REP_F_LOGSONLY 0x04 /* Log-site only; cannot be upgraded. */
+#define REP_F_MASTER 0x08 /* Master replica. */
+#define REP_F_RECOVER 0x10
+#define REP_F_UPGRADE 0x20 /* Upgradeable replica. */
+#define REP_ISCLIENT (REP_F_UPGRADE | REP_F_LOGSONLY)
+ u_int32_t flags;
+} REP;
+
+#define IN_ELECTION(R) F_ISSET((R), REP_F_EPHASE1 | REP_F_EPHASE2)
+#define ELECTION_DONE(R) F_CLR((R), REP_F_EPHASE1 | REP_F_EPHASE2)
+
+/*
+ * Per-process replication structure.
+ */
+struct __db_rep {
+ DB_MUTEX *mutexp;
+ DB *rep_db; /* Bookkeeping database. */
+ REP *region; /* In memory structure. */
+ int (*rep_send) /* Send function. */
+ __P((DB_ENV *, void *,
+ const DBT *, DBT *, u_int32_t, int));
+ void *rep_send_data; /* User data passed to every send. */
+};
+
+/* Control structure for replication communication infrastructure. */
+typedef struct __rep_control {
+ DB_LSN lsn; /* Log sequence number. */
+ u_int32_t rectype; /* Message type. */
+ u_int32_t gen; /* Generation number. */
+ u_int32_t flags; /* log_put flag value. */
+} REP_CONTROL;
+
+/* Election vote information. */
+typedef struct __rep_vote {
+ int priority; /* My site's priority. */
+ int nsites; /* Number of sites I've been in
+ * communication with. */
+} REP_VOTE_INFO;
+
+/*
+ * This structure takes care of representing a transaction.
+ * It holds all the records, sorted by page number so that
+ * we can obtain locks and apply updates in a deadlock free
+ * order.
+ */
+typedef struct __lsn_page {
+ DB_LSN lsn;
+ u_int32_t fid;
+ DB_LOCK_ILOCK pgdesc;
+#define LSN_PAGE_NOLOCK 0x0001 /* No lock necessary for log rec. */
+ u_int32_t flags;
+} LSN_PAGE;
+
+typedef struct __txn_recs {
+ int npages;
+ int nalloc;
+ LSN_PAGE *array;
+ u_int32_t txnid;
+ u_int32_t lockid;
+} TXN_RECS;
+
+/*
+ * This is used by the page-prep routines to do the lock_vec call to
+ * apply the updates for a single transaction or a collection of
+ * transactions.
+ */
+typedef struct _linfo {
+ int n;
+ DB_LOCKREQ *reqs;
+ DBT *objs;
+} linfo_t;
+
+#include "rep_ext.h"
+#endif /* _REP_H_ */
diff --git a/db/include_auto/rep_ext.h b/db/include_auto/rep_ext.h
new file mode 100644
index 000000000..f030c9c23
--- /dev/null
+++ b/db/include_auto/rep_ext.h
@@ -0,0 +1,26 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rep_ext_h_
+#define _rep_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+int __rep_dbenv_create __P((DB_ENV *));
+int __rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+int __rep_client_dbinit __P((DB_ENV *, int));
+int __rep_send_message __P((DB_ENV *, int, u_int32_t, DB_LSN *, const DBT *, u_int32_t));
+int __rep_region_init __P((DB_ENV *));
+int __rep_region_destroy __P((DB_ENV *));
+int __rep_dbenv_close __P((DB_ENV *));
+int __rep_preclose __P((DB_ENV *));
+int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int));
+int __rep_new_master __P((DB_ENV *, REP_CONTROL *, int));
+int __rep_lockpgno_init __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+int __rep_unlockpages __P((DB_ENV *, u_int32_t));
+int __rep_lockpages __P((DB_ENV *, int (**)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), DB_LSN *, DB_LSN *, TXN_RECS *, u_int32_t));
+int __rep_is_client __P((DB_ENV *));
+int __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int));
+int __rep_grow_sites __P((DB_ENV *dbenv, int nsites));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _rep_ext_h_ */
diff --git a/db/include_auto/rep_ext.in b/db/include_auto/rep_ext.in
new file mode 100644
index 000000000..e6d994d07
--- /dev/null
+++ b/db/include_auto/rep_ext.in
@@ -0,0 +1,42 @@
+/* DO NOT EDIT: automatically built by dist/s_include. */
+#ifndef _rep_ext_h_
+#define _rep_ext_h_
+#if defined(__cplusplus)
+extern "C" {
+#endif
+#define __rep_dbenv_create __rep_dbenv_create@DB_VERSION_UNIQUE_NAME@
+int __rep_dbenv_create __P((DB_ENV *));
+#define __rep_process_message __rep_process_message@DB_VERSION_UNIQUE_NAME@
+int __rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+#define __rep_client_dbinit __rep_client_dbinit@DB_VERSION_UNIQUE_NAME@
+int __rep_client_dbinit __P((DB_ENV *, int));
+#define __rep_send_message __rep_send_message@DB_VERSION_UNIQUE_NAME@
+int __rep_send_message __P((DB_ENV *, int, u_int32_t, DB_LSN *, const DBT *, u_int32_t));
+#define __rep_region_init __rep_region_init@DB_VERSION_UNIQUE_NAME@
+int __rep_region_init __P((DB_ENV *));
+#define __rep_region_destroy __rep_region_destroy@DB_VERSION_UNIQUE_NAME@
+int __rep_region_destroy __P((DB_ENV *));
+#define __rep_dbenv_close __rep_dbenv_close@DB_VERSION_UNIQUE_NAME@
+int __rep_dbenv_close __P((DB_ENV *));
+#define __rep_preclose __rep_preclose@DB_VERSION_UNIQUE_NAME@
+int __rep_preclose __P((DB_ENV *));
+#define __rep_check_alloc __rep_check_alloc@DB_VERSION_UNIQUE_NAME@
+int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int));
+#define __rep_new_master __rep_new_master@DB_VERSION_UNIQUE_NAME@
+int __rep_new_master __P((DB_ENV *, REP_CONTROL *, int));
+#define __rep_lockpgno_init __rep_lockpgno_init@DB_VERSION_UNIQUE_NAME@
+int __rep_lockpgno_init __P((DB_ENV *, int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), size_t *));
+#define __rep_unlockpages __rep_unlockpages@DB_VERSION_UNIQUE_NAME@
+int __rep_unlockpages __P((DB_ENV *, u_int32_t));
+#define __rep_lockpages __rep_lockpages@DB_VERSION_UNIQUE_NAME@
+int __rep_lockpages __P((DB_ENV *, int (**)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *), DB_LSN *, DB_LSN *, TXN_RECS *, u_int32_t));
+#define __rep_is_client __rep_is_client@DB_VERSION_UNIQUE_NAME@
+int __rep_is_client __P((DB_ENV *));
+#define __rep_send_vote __rep_send_vote@DB_VERSION_UNIQUE_NAME@
+int __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int));
+#define __rep_grow_sites __rep_grow_sites@DB_VERSION_UNIQUE_NAME@
+int __rep_grow_sites __P((DB_ENV *dbenv, int nsites));
+#if defined(__cplusplus)
+}
+#endif
+#endif /* _rep_ext_h_ */
diff --git a/db/java/src/com/sleepycat/db/DbLockNotGrantedException.java b/db/java/src/com/sleepycat/db/DbLockNotGrantedException.java
new file mode 100644
index 000000000..6eab84302
--- /dev/null
+++ b/db/java/src/com/sleepycat/db/DbLockNotGrantedException.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: DbLockNotGrantedException.java,v 11.2 2001/10/05 02:36:06 bostic Exp
+ */
+
+package com.sleepycat.db;
+
+public class DbLockNotGrantedException extends DbException {
+ public DbLockNotGrantedException(String message,
+ int op, int mode, Dbt obj,
+ DbLock lock, int index)
+ {
+ super(message, Db.DB_LOCK_NOTGRANTED);
+ this.op = op;
+ this.mode = mode;
+ this.obj = obj;
+ this.lock = lock;
+ this.index = index;
+ }
+
+ public int get_op()
+ {
+ return op;
+ }
+
+ public int get_mode()
+ {
+ return mode;
+ }
+
+ public Dbt get_obj()
+ {
+ return obj;
+ }
+
+ public DbLock get_lock()
+ {
+ return lock;
+ }
+
+ public int get_index()
+ {
+ return index;
+ }
+
+ private int op;
+ private int mode;
+ private Dbt obj;
+ private DbLock lock;
+ private int index;
+
+}
+
diff --git a/db/java/src/com/sleepycat/db/DbLockRequest.java b/db/java/src/com/sleepycat/db/DbLockRequest.java
new file mode 100644
index 000000000..4cd5c6290
--- /dev/null
+++ b/db/java/src/com/sleepycat/db/DbLockRequest.java
@@ -0,0 +1,66 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: DbLockRequest.java,v 11.2 2001/10/05 02:36:06 bostic Exp
+ */
+
+package com.sleepycat.db;
+
+public class DbLockRequest
+{
+ public DbLockRequest(int op, int mode, Dbt obj, DbLock lock)
+ {
+ this.op = op;
+ this.mode = mode;
+ this.obj = obj;
+ this.lock = lock;
+ }
+
+ public int get_op()
+ {
+ return op;
+ }
+
+ public void set_op(int op)
+ {
+ this.op = op;
+ }
+
+ public int get_mode()
+ {
+ return mode;
+ }
+
+ public void set_mode(int mode)
+ {
+ this.mode = mode;
+ }
+
+ public Dbt get_obj()
+ {
+ return obj;
+ }
+
+ public void set_obj(Dbt obj)
+ {
+ this.obj = obj;
+ }
+
+ public DbLock get_lock()
+ {
+ return lock;
+ }
+
+ public void set_lock(DbLock lock)
+ {
+ this.lock = lock;
+ }
+
+ private int op;
+ private int mode;
+ private Dbt obj;
+ private DbLock lock;
+}
diff --git a/db/java/src/com/sleepycat/db/DbLogc.java b/db/java/src/com/sleepycat/db/DbLogc.java
new file mode 100644
index 000000000..d3395f883
--- /dev/null
+++ b/db/java/src/com/sleepycat/db/DbLogc.java
@@ -0,0 +1,39 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: DbLogc.java,v 11.2 2001/10/02 13:36:27 dda Exp
+ */
+
+package com.sleepycat.db;
+
+/**
+ *
+ * @author Donald D. Anderson
+ */
+public class DbLogc
+{
+ // methods
+ //
+ public native void close(int flags)
+ throws DbException;
+
+ // returns: 0, DB_NOTFOUND, or throws error
+ public native int get(DbLsn lsn, Dbt data, int flags)
+ throws DbException;
+
+ protected native void finalize()
+ throws Throwable;
+
+ // private data
+ //
+ private long private_dbobj_ = 0;
+
+ static {
+ Db.load_db();
+ }
+}
+
+// end of DbLogc.java
diff --git a/db/java/src/com/sleepycat/db/DbRepTransport.java b/db/java/src/com/sleepycat/db/DbRepTransport.java
new file mode 100644
index 000000000..80b91d6ca
--- /dev/null
+++ b/db/java/src/com/sleepycat/db/DbRepTransport.java
@@ -0,0 +1,19 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: DbRepTransport.java,v 11.1 2001/10/04 04:59:15 krinsky Exp
+ */
+
+package com.sleepycat.db;
+
+/*
+ * This is used as a callback by DbEnv.set_rep_transport.
+ */
+public interface DbRepTransport
+{
+ public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
+ throws DbException;
+}
diff --git a/db/libdb_java/com_sleepycat_db_DbLogc.h b/db/libdb_java/com_sleepycat_db_DbLogc.h
new file mode 100644
index 000000000..a6fbb670d
--- /dev/null
+++ b/db/libdb_java/com_sleepycat_db_DbLogc.h
@@ -0,0 +1,37 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include <jni.h>
+/* Header for class com_sleepycat_db_DbLogc */
+
+#ifndef _Included_com_sleepycat_db_DbLogc
+#define _Included_com_sleepycat_db_DbLogc
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: close
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
+ (JNIEnv *, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: get
+ * Signature: (Lcom/sleepycat/db/DbLsn;Lcom/sleepycat/db/Dbt;I)I
+ */
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
+ (JNIEnv *, jobject, jobject, jobject, jint);
+
+/*
+ * Class: com_sleepycat_db_DbLogc
+ * Method: finalize
+ * Signature: ()V
+ */
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
+ (JNIEnv *, jobject);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/db/libdb_java/java_DbLogc.c b/db/libdb_java/java_DbLogc.c
new file mode 100644
index 000000000..63b3e0eb3
--- /dev/null
+++ b/db/libdb_java/java_DbLogc.c
@@ -0,0 +1,107 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: java_DbLogc.c,v 11.2 2001/10/02 01:33:40 bostic Exp ";
+#endif /* not lint */
+
+#include <jni.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#endif
+
+#include "db_int.h"
+#include "java_util.h"
+#include "com_sleepycat_db_DbLogc.h"
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_close
+ (JNIEnv *jnienv, jobject jthis, jint flags)
+{
+ int err;
+ DB_LOGC *dblogc = get_DB_LOGC(jnienv, jthis);
+
+ if (!verify_non_null(jnienv, dblogc))
+ return;
+ err = dblogc->close(dblogc, flags);
+ if (verify_return(jnienv, err, 0)) {
+ set_private_dbobj(jnienv, name_DB_LOGC, jthis, 0);
+ }
+}
+
+JNIEXPORT jint JNICALL Java_com_sleepycat_db_DbLogc_get
+ (JNIEnv *jnienv, jobject jthis,
+ /*DbLsn*/ jobject lsn, /*Dbt*/ jobject data, jint flags)
+{
+ int err, retry;
+ DB_LOGC *dblogc;
+ DB_LSN *dblsn;
+ LOCKED_DBT ldata;
+ OpKind dataop;
+
+ /* Depending on flags, the user may be supplying the key,
+ * or else we may have to retrieve it.
+ */
+ err = 0;
+ dataop = outOp;
+
+ dblogc = get_DB_LOGC(jnienv, jthis);
+ dblsn = get_DB_LSN(jnienv, lsn);
+ if (locked_dbt_get(&ldata, jnienv, data, dataop) != 0)
+ goto out1;
+
+ if (!verify_non_null(jnienv, dblogc))
+ goto out1;
+
+ for (retry = 0; retry < 3; retry++) {
+ err = dblogc->get(dblogc, dblsn, &ldata.javainfo->dbt, flags);
+
+ /* If we failed due to lack of memory in our DBT arrays,
+ * retry.
+ */
+ if (err != ENOMEM)
+ break;
+ if (!locked_dbt_realloc(&ldata, jnienv))
+ break;
+ }
+ out1:
+ locked_dbt_put(&ldata, jnienv);
+ if (err != 0 && err != DB_NOTFOUND) {
+ if (verify_dbt(jnienv, err, &ldata))
+ verify_return(jnienv, err, 0);
+ }
+ return (err);
+}
+
+JNIEXPORT void JNICALL Java_com_sleepycat_db_DbLogc_finalize
+ (JNIEnv *jnienv, jobject jthis)
+{
+ /* Free any data related to DB_LOGC here.
+ * If we ever have java-only data embedded in the DB_LOGC
+ * and need to do this, we'll have to track DbLogc's
+ * according to which DbEnv owns them, just as
+ * we track Db's according to which DbEnv owns them.
+ * That's necessary to avoid double freeing that
+ * comes about when closes interact with GC.
+ */
+
+#ifdef DIAGNOSTIC
+ DB_LOGC *dblogc;
+
+ dblogc = get_DB_LOGC(jnienv, jthis);
+ if (dblogc != NULL)
+ fprintf(stderr, "Java API: DbLogc has not been closed\n");
+#else
+
+ COMPQUIET(jnienv, NULL);
+ COMPQUIET(jthis, NULL);
+
+#endif
+}
diff --git a/db/os/os_clock.c b/db/os/os_clock.c
new file mode 100644
index 000000000..b6218a845
--- /dev/null
+++ b/db/os/os_clock.c
@@ -0,0 +1,93 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: os_clock.c,v 1.6 2001/09/07 18:17:49 krinsky Exp ";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#if TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#if HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif /* HAVE_SYS_TIME_H */
+#endif /* TIME_WITH SYS_TIME */
+
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_clock --
+ * Return the current time-of-day clock in seconds and microseconds.
+ *
+ * PUBLIC: int __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_clock(dbenv, secsp, usecsp)
+ DB_ENV *dbenv;
+ u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */
+{
+#if defined(HAVE_GETTIMEOFDAY)
+ struct timeval tp;
+ int ret;
+
+retry: if (gettimeofday(&tp, NULL) != 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "gettimeofday: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = tp.tv_sec;
+ if (usecsp != NULL)
+ *usecsp = tp.tv_usec;
+#endif
+#if !defined(HAVE_GETTIMEOFDAY) && defined(HAVE_CLOCK_GETTIME)
+ struct timespec tp;
+ int ret;
+
+retry: if (clock_gettime(CLOCK_REALTIME, &tp) != 0) {
+ if ((ret = __os_get_errno()) == EINTR)
+ goto retry;
+ __db_err(dbenv, "clock_gettime: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = tp.tv_sec;
+ if (usecsp != NULL)
+ *usecsp = tp.tv_nsec / 1000;
+#endif
+#if !defined(HAVE_GETTIMEOFDAY) && !defined(HAVE_CLOCK_GETTIME)
+ time_t now;
+ int ret;
+
+ if (time(&now) == (time_t)-1) {
+ ret = __os_get_errno();
+ __db_err(dbenv, "time: %s", strerror(ret));
+ return (ret);
+ }
+
+ if (secsp != NULL)
+ *secsp = now;
+ if (usecsp != NULL)
+ *usecsp = 0;
+#endif
+ return (0);
+}
diff --git a/db/os_vxworks/os_vx_abs.c b/db/os_vxworks/os_vx_abs.c
new file mode 100644
index 000000000..dcf4190dd
--- /dev/null
+++ b/db/os_vxworks/os_vx_abs.c
@@ -0,0 +1,45 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: os_vx_abs.c,v 1.6 2001/05/23 14:47:23 sue Exp ";
+#endif /* not lint */
+
+#include "db_int.h"
+#include "iosLib.h"
+
+/*
+ * __os_abspath --
+ * Return if a path is an absolute path.
+ */
+int
+__os_abspath(path)
+ const char *path;
+{
+ DEV_HDR *dummy;
+ char *ptail;
+
+ /*
+ * VxWorks devices can be rooted at any name at all.
+ * Use iosDevFind() to see if name matches any of our devices.
+ */
+ if ((dummy = iosDevFind((char *)path, &ptail)) == NULL)
+ return (0);
+ /*
+ * If the routine used a device, then ptail points to the
+ * rest and we are an abs path.
+ */
+ if (ptail != path)
+ return (1);
+ /*
+ * If the path starts with a '/', then we are an absolute path,
+ * using the host machine, otherwise we are not.
+ */
+ return (path[0] == '/');
+}
diff --git a/db/os_vxworks/os_vx_finit.c b/db/os_vxworks/os_vx_finit.c
new file mode 100644
index 000000000..44ab6913d
--- /dev/null
+++ b/db/os_vxworks/os_vx_finit.c
@@ -0,0 +1,31 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1999-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: os_vx_finit.c,v 1.3 2001/06/01 18:35:55 bostic Exp ";
+#endif /* not lint */
+
+#include "db_int.h"
+
+/*
+ * __os_fs_notzero --
+ * Return 1 if allocated filesystem blocks are not zeroed.
+ *
+ * PUBLIC: int __os_fs_notzero __P((void));
+ */
+int
+__os_fs_notzero()
+{
+ /*
+ * Some VxWorks FS drivers do not zero-fill pages that were never
+ * explicitly written to the file, they give you random garbage,
+ * and that breaks Berkeley DB.
+ */
+ return (1);
+}
diff --git a/db/os_vxworks/os_vx_map.c b/db/os_vxworks/os_vx_map.c
new file mode 100644
index 000000000..6e971e15c
--- /dev/null
+++ b/db/os_vxworks/os_vx_map.c
@@ -0,0 +1,442 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1998-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * This code is derived from software contributed to Sleepycat Software by
+ * Frederick G.M. Roeber of Netscape Communications Corp.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: os_vx_map.c,v 1.17 2001/07/31 19:30:36 sue Exp ";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "common_ext.h"
+
+/*
+ * DB uses memory-mapped files for two things:
+ * faster access of read-only databases, and
+ * shared memory for process synchronization and locking.
+ * The code carefully does not mix the two uses. The first-case uses are
+ * actually written such that memory-mapping isn't really required -- it's
+ * merely a convenience -- so we don't have to worry much about it. In the
+ * second case, it's solely used as a shared memory mechanism, so that's
+ * all we have to replace.
+ *
+ * All memory in VxWorks is shared, and a task can allocate memory and keep
+ * notes. So I merely have to allocate memory, remember the "filename" for
+ * that memory, and issue small-integer segment IDs which index the list of
+ * these shared-memory segments. Subsequent opens are checked against the
+ * list of already open segments.
+ */
+typedef struct {
+ void *segment; /* Segment address. */
+ u_int32_t size; /* Segment size. */
+ char *name; /* Segment name. */
+ long segid; /* Segment ID. */
+} os_segdata_t;
+
+static os_segdata_t *__os_segdata; /* Segment table. */
+static int __os_segdata_size; /* Segment table size. */
+
+#define OS_SEGDATA_STARTING_SIZE 16
+#define OS_SEGDATA_INCREMENT 16
+
+static int __os_segdata_allocate
+ __P((DB_ENV *, const char *, REGINFO *, REGION *));
+static int __os_segdata_find_byname
+ __P((DB_ENV *, const char *, REGINFO *, REGION *));
+static int __os_segdata_init __P((DB_ENV *));
+static int __os_segdata_new __P((DB_ENV *, int *));
+static int __os_segdata_release __P((DB_ENV *, REGION *, int));
+
+/*
+ * __os_r_sysattach --
+ * Create/join a shared memory region.
+ *
+ * PUBLIC: int __os_r_sysattach __P((DB_ENV *, REGINFO *, REGION *));
+ */
+int
+__os_r_sysattach(dbenv, infop, rp)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ REGION *rp;
+{
+ int ret;
+
+ if (__os_segdata == NULL)
+ __os_segdata_init(dbenv);
+
+ DB_BEGIN_SINGLE_THREAD;
+
+ /* Try to find an already existing segment. */
+ ret = __os_segdata_find_byname(dbenv, infop->name, infop, rp);
+
+ /*
+ * If we are trying to join a region, it is easy, either we
+ * found it and we return, or we didn't find it and we return
+ * an error that it doesn't exist.
+ */
+ if (!F_ISSET(infop, REGION_CREATE)) {
+ if (ret != 0) {
+ __db_err(dbenv, "segment %s does not exist",
+ infop->name);
+ ret = EAGAIN;
+ }
+ goto out;
+ }
+
+ /*
+ * If we get here, we are trying to create the region.
+ * There are several things to consider:
+ * - if we have an error (not a found or not-found value), return.
+ * - they better have shm_key set.
+ * - if the region is already there (ret == 0 from above),
+ * assume the application crashed and we're restarting.
+ * Delete the old region.
+ * - try to create the region.
+ */
+ if (ret != 0 && ret != ENOENT)
+ goto out;
+
+ if (dbenv->shm_key == INVALID_REGION_SEGID) {
+ __db_err(dbenv, "no base shared memory ID specified");
+ ret = EAGAIN;
+ goto out;
+ }
+ if (ret == 0 && __os_segdata_release(dbenv, rp, 1) != 0) {
+ __db_err(dbenv,
+ "key: %ld: shared memory region already exists",
+ dbenv->shm_key + (infop->id - 1));
+ ret = EAGAIN;
+ goto out;
+ }
+
+ ret = __os_segdata_allocate(dbenv, infop->name, infop, rp);
+out:
+ DB_END_SINGLE_THREAD;
+ return (ret);
+}
+
+/*
+ * __os_r_sysdetach --
+ * Detach from a shared region.
+ *
+ * PUBLIC: int __os_r_sysdetach __P((DB_ENV *, REGINFO *, int));
+ */
+int
+__os_r_sysdetach(dbenv, infop, destroy)
+ DB_ENV *dbenv;
+ REGINFO *infop;
+ int destroy;
+{
+ /*
+ * If just detaching, there is no mapping to discard.
+ * If destroying, remove the region.
+ */
+ if (destroy)
+ return (__os_segdata_release(dbenv, infop->rp, 0));
+ return (0);
+}
+
+/*
+ * __os_mapfile --
+ * Map in a shared memory file.
+ *
+ * PUBLIC: int __os_mapfile __P((DB_ENV *,
+ * PUBLIC: char *, DB_FH *, size_t, int, void **));
+ */
+int
+__os_mapfile(dbenv, path, fhp, len, is_rdonly, addrp)
+ DB_ENV *dbenv;
+ char *path;
+ DB_FH *fhp;
+ int is_rdonly;
+ size_t len;
+ void **addrp;
+{
+ /* We cannot map in regular files in VxWorks. */
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(path, NULL);
+ COMPQUIET(fhp, NULL);
+ COMPQUIET(is_rdonly, 0);
+ COMPQUIET(len, 0);
+ COMPQUIET(addrp, NULL);
+ return (EINVAL);
+}
+
+/*
+ * __os_unmapfile --
+ * Unmap the shared memory file.
+ *
+ * PUBLIC: int __os_unmapfile __P((DB_ENV *, void *, size_t));
+ */
+int
+__os_unmapfile(dbenv, addr, len)
+ DB_ENV *dbenv;
+ void *addr;
+ size_t len;
+{
+ /* We cannot map in regular files in VxWorks. */
+ COMPQUIET(dbenv, NULL);
+ COMPQUIET(addr, NULL);
+ COMPQUIET(len, 0);
+ return (EINVAL);
+}
+
+/*
+ * __os_segdata_init --
+ * Initializes the library's table of shared memory segments.
+ * Called once on the first time through __os_segdata_new().
+ */
+static int
+__os_segdata_init(dbenv)
+ DB_ENV *dbenv;
+{
+ int ret;
+
+ if (__os_segdata != NULL) {
+ __db_err(dbenv, "shared memory segment already exists");
+ return (EEXIST);
+ }
+
+ /*
+ * The lock init call returns a locked lock.
+ */
+ DB_BEGIN_SINGLE_THREAD;
+ __os_segdata_size = OS_SEGDATA_STARTING_SIZE;
+ ret = __os_calloc(dbenv,
+ __os_segdata_size, sizeof(os_segdata_t), &__os_segdata);
+ DB_END_SINGLE_THREAD;
+ return (ret);
+}
+
+/*
+ * __os_segdata_destroy --
+ * Destroys the library's table of shared memory segments. It also
+ * frees all linked data: the segments themselves, and their names.
+ * Currently not called. This function should be called if the
+ * user creates a function to unload or shutdown.
+ *
+ * PUBLIC: int __os_segdata_destroy __P((DB_ENV *));
+ */
+int
+__os_segdata_destroy(dbenv)
+ DB_ENV *dbenv;
+{
+ os_segdata_t *p;
+ int i;
+
+ if (__os_segdata == NULL)
+ return (0);
+
+ DB_BEGIN_SINGLE_THREAD;
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->name != NULL) {
+ __os_freestr(dbenv, p->name);
+ p->name = NULL;
+ }
+ if (p->segment != NULL) {
+ __os_free(dbenv, p->segment, p->size);
+ p->segment = NULL;
+ }
+ p->size = 0;
+ }
+
+ __os_free(dbenv, __os_segdata, __os_segdata_size * sizeof(os_segdata_t));
+ __os_segdata = NULL;
+ __os_segdata_size = 0;
+ DB_END_SINGLE_THREAD;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_allocate --
+ * Creates a new segment of the specified size, optionally with the
+ * specified name.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ */
+static int
+__os_segdata_allocate(dbenv, name, infop, rp)
+ DB_ENV *dbenv;
+ const char *name;
+ REGINFO *infop;
+ REGION *rp;
+{
+ os_segdata_t *p;
+ int id, ret;
+
+ if ((ret = __os_segdata_new(dbenv, &id)) != 0)
+ return (ret);
+
+ p = &__os_segdata[id];
+ if ((ret = __os_calloc(dbenv, 1, rp->size, &p->segment)) != 0)
+ return (ret);
+ if ((ret = __os_strdup(dbenv, name, &p->name)) != 0) {
+ __os_free(dbenv, p->segment, rp->size);
+ p->segment = NULL;
+ return (ret);
+ }
+ p->size = rp->size;
+ p->segid = dbenv->shm_key + infop->id - 1;
+
+ infop->addr = p->segment;
+ rp->segid = id;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_new --
+ * Finds a new segdata slot. Does not initialise it, so the fd returned
+ * is only valid until you call this again.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ */
+static int
+__os_segdata_new(dbenv, segidp)
+ DB_ENV *dbenv;
+ int *segidp;
+{
+ os_segdata_t *p;
+ int i, newsize, ret;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->segment == NULL) {
+ *segidp = i;
+ return (0);
+ }
+ }
+
+ /*
+ * No more free slots, expand.
+ */
+ newsize = __os_segdata_size + OS_SEGDATA_INCREMENT;
+ if ((ret = __os_realloc(dbenv, newsize * sizeof(os_segdata_t),
+ &__os_segdata)) != 0)
+ return (ret);
+ memset(&__os_segdata[__os_segdata_size],
+ 0, OS_SEGDATA_INCREMENT * sizeof(os_segdata_t));
+
+ *segidp = __os_segdata_size;
+ __os_segdata_size = newsize;
+
+ return (0);
+}
+
+/*
+ * __os_segdata_find_byname --
+ * Finds a segment by its name and shm_key.
+ *
+ * Assumes it is called with the SEGDATA lock taken.
+ *
+ * PUBLIC: __os_segdata_find_byname
+ * PUBLIC: __P((DB_ENV *, const char *, REGINFO *, REGION *));
+ */
+static int
+__os_segdata_find_byname(dbenv, name, infop, rp)
+ DB_ENV *dbenv;
+ const char *name;
+ REGINFO *infop;
+ REGION *rp;
+{
+ os_segdata_t *p;
+ long segid;
+ int i;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ if (name == NULL) {
+ __db_err(dbenv, "no segment name given");
+ return (EAGAIN);
+ }
+
+ /*
+ * If we are creating the region, compute the segid.
+ * If we are joining the region, we use the segid in the
+ * index we are given.
+ */
+ if (F_ISSET(infop, REGION_CREATE))
+ segid = dbenv->shm_key + (infop->id - 1);
+ else {
+ if (rp->segid >= __os_segdata_size ||
+ rp->segid == INVALID_REGION_SEGID) {
+ __db_err(dbenv, "Invalid segment id given");
+ return (EAGAIN);
+ }
+ segid = __os_segdata[rp->segid].segid;
+ }
+ for (i = 0; i < __os_segdata_size; i++) {
+ p = &__os_segdata[i];
+ if (p->name != NULL && strcmp(name, p->name) == 0 &&
+ p->segid == segid) {
+ infop->addr = p->segment;
+ rp->segid = i;
+ return (0);
+ }
+ }
+ return (ENOENT);
+}
+
+/*
+ * __os_segdata_release --
+ * Free a segdata entry.
+ */
+static int
+__os_segdata_release(dbenv, rp, is_locked)
+ DB_ENV *dbenv;
+ REGION *rp;
+ int is_locked;
+{
+ os_segdata_t *p;
+
+ if (__os_segdata == NULL) {
+ __db_err(dbenv, "shared memory segment not initialized");
+ return (EAGAIN);
+ }
+
+ if (rp->segid < 0 || rp->segid >= __os_segdata_size) {
+ __db_err(dbenv, "segment id %ld out of range", rp->segid);
+ return (EINVAL);
+ }
+
+ if (is_locked == 0)
+ DB_BEGIN_SINGLE_THREAD;
+ p = &__os_segdata[rp->segid];
+ if (p->name != NULL) {
+ __os_freestr(dbenv, p->name);
+ p->name = NULL;
+ }
+ if (p->segment != NULL) {
+ __os_free(dbenv, p->segment, p->size);
+ p->segment = NULL;
+ }
+ p->size = 0;
+ if (is_locked == 0)
+ DB_END_SINGLE_THREAD;
+
+ /* Any shrink-table logic could go here */
+
+ return (0);
+}
diff --git a/db/os_win32/os_clock.c b/db/os_win32/os_clock.c
new file mode 100644
index 000000000..7e73f5ba2
--- /dev/null
+++ b/db/os_win32/os_clock.c
@@ -0,0 +1,40 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: os_clock.c,v 1.2 2001/08/18 17:39:08 dda Exp ";
+#endif /* not lint */
+
+#include <sys/types.h>
+#include <sys/timeb.h>
+#include <string.h>
+
+#include "db_int.h"
+#include "os_jump.h"
+
+/*
+ * __os_clock --
+ * Return the current time-of-day clock in seconds and microseconds.
+ *
+ * PUBLIC: int __os_clock __P((DB_ENV *, u_int32_t *, u_int32_t *));
+ */
+int
+__os_clock(dbenv, secsp, usecsp)
+ DB_ENV *dbenv;
+ u_int32_t *secsp, *usecsp; /* Seconds and microseconds. */
+{
+ struct _timeb now;
+
+ _ftime(&now);
+ if (secsp != NULL)
+ *secsp = now.time;
+ if (usecsp != NULL)
+ *usecsp = now.millitm * 1000;
+ return (0);
+}
diff --git a/db/perl/BerkeleyDB/BerkeleyDB.pm b/db/perl/BerkeleyDB/BerkeleyDB.pm
new file mode 100644
index 000000000..03374e4d6
--- /dev/null
+++ b/db/perl/BerkeleyDB/BerkeleyDB.pm
@@ -0,0 +1,1231 @@
+
+package BerkeleyDB;
+
+
+# Copyright (c) 1997-2001 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+#
+
+# The documentation for this module is at the bottom of this file,
+# after the line __END__.
+
+BEGIN { require 5.004_04 }
+
+use strict;
+use Carp;
+use vars qw($VERSION @ISA @EXPORT $AUTOLOAD);
+
+$VERSION = '0.16';
+
+require Exporter;
+require DynaLoader;
+require AutoLoader;
+use IO ;
+
+@ISA = qw(Exporter DynaLoader);
+# Items to export into callers namespace by default. Note: do not export
+# names by default without a very good reason. Use EXPORT_OK instead.
+# Do not simply export all your public functions/methods/constants.
+@EXPORT = qw(
+
+ DB_AFTER
+ DB_APPEND
+ DB_ARCH_ABS
+ DB_ARCH_DATA
+ DB_ARCH_LOG
+ DB_BEFORE
+ DB_BTREE
+ DB_BTREEMAGIC
+ DB_BTREEOLDVER
+ DB_BTREEVERSION
+ DB_CHECKPOINT
+ DB_CONSUME
+ DB_CONSUME_WAIT
+ DB_CREATE
+ DB_CURLSN
+ DB_CURRENT
+ DB_DBT_MALLOC
+ DB_DBT_PARTIAL
+ DB_DBT_USERMEM
+ DB_DELETED
+ DB_DELIMITER
+ DB_DUP
+ DB_DUPSORT
+ DB_ENV_APPINIT
+ DB_ENV_STANDALONE
+ DB_ENV_THREAD
+ DB_EXCL
+ DB_FILE_ID_LEN
+ DB_FIRST
+ DB_FIXEDLEN
+ DB_FLUSH
+ DB_FORCE
+ DB_GET_BOTH
+ DB_GET_RECNO
+ DB_HASH
+ DB_HASHMAGIC
+ DB_HASHOLDVER
+ DB_HASHVERSION
+ DB_INCOMPLETE
+ DB_INIT_CDB
+ DB_INIT_LOCK
+ DB_INIT_LOG
+ DB_INIT_MPOOL
+ DB_INIT_TXN
+ DB_JOIN_ITEM
+ DB_JOINENV
+ DB_KEYEMPTY
+ DB_KEYEXIST
+ DB_KEYFIRST
+ DB_KEYLAST
+ DB_LAST
+ DB_LOCKMAGIC
+ DB_LOCKVERSION
+ DB_LOCK_CONFLICT
+ DB_LOCK_DEADLOCK
+ DB_LOCK_DEFAULT
+ DB_LOCK_GET
+ DB_LOCK_NORUN
+ DB_LOCK_NOTGRANTED
+ DB_LOCK_NOTHELD
+ DB_LOCK_NOWAIT
+ DB_LOCK_OLDEST
+ DB_LOCK_RANDOM
+ DB_LOCK_RIW_N
+ DB_LOCK_RW_N
+ DB_LOCK_YOUNGEST
+ DB_LOGMAGIC
+ DB_LOGOLDVER
+ DB_MAX_PAGES
+ DB_MAX_RECORDS
+ DB_MPOOL_CLEAN
+ DB_MPOOL_CREATE
+ DB_MPOOL_DIRTY
+ DB_MPOOL_DISCARD
+ DB_MPOOL_LAST
+ DB_MPOOL_NEW
+ DB_MPOOL_PRIVATE
+ DB_MUTEXDEBUG
+ DB_MUTEXLOCKS
+ DB_NEEDSPLIT
+ DB_NEXT
+ DB_NEXT_DUP
+ DB_NOMMAP
+ DB_NOOVERWRITE
+ DB_NOSYNC
+ DB_NOTFOUND
+ DB_PAD
+ DB_PAGEYIELD
+ DB_POSITION
+ DB_PREV
+ DB_PRIVATE
+ DB_QUEUE
+ DB_RDONLY
+ DB_RECNO
+ DB_RECNUM
+ DB_RECORDCOUNT
+ DB_RECOVER
+ DB_RECOVER_FATAL
+ DB_REGISTERED
+ DB_RENUMBER
+ DB_RMW
+ DB_RUNRECOVERY
+ DB_SEQUENTIAL
+ DB_SET
+ DB_SET_RANGE
+ DB_SET_RECNO
+ DB_SNAPSHOT
+ DB_SWAPBYTES
+ DB_TEMPORARY
+ DB_THREAD
+ DB_TRUNCATE
+ DB_TXNMAGIC
+ DB_TXNVERSION
+ DB_TXN_BACKWARD_ROLL
+ DB_TXN_CKP
+ DB_TXN_FORWARD_ROLL
+ DB_TXN_LOCK_2PL
+ DB_TXN_LOCK_MASK
+ DB_TXN_LOCK_OPTIMIST
+ DB_TXN_LOCK_OPTIMISTIC
+ DB_TXN_LOG_MASK
+ DB_TXN_LOG_REDO
+ DB_TXN_LOG_UNDO
+ DB_TXN_LOG_UNDOREDO
+ DB_TXN_NOSYNC
+ DB_TXN_NOWAIT
+ DB_TXN_OPENFILES
+ DB_TXN_REDO
+ DB_TXN_SYNC
+ DB_TXN_UNDO
+ DB_USE_ENVIRON
+ DB_USE_ENVIRON_ROOT
+ DB_VERSION_MAJOR
+ DB_VERSION_MINOR
+ DB_VERSION_PATCH
+ DB_WRITECURSOR
+ );
+
+sub AUTOLOAD {
+ # This AUTOLOAD is used to 'autoload' constants from the constant()
+ # XS function. If a constant is not found then control is passed
+ # to the AUTOLOAD in AutoLoader.
+
+ my $constname;
+ ($constname = $AUTOLOAD) =~ s/.*:://;
+ my $val = constant($constname, @_ ? $_[0] : 0);
+ if ($! != 0) {
+ if ($! =~ /Invalid/) {
+ $AutoLoader::AUTOLOAD = $AUTOLOAD;
+ goto &AutoLoader::AUTOLOAD;
+ }
+ else {
+ croak "Your vendor has not defined BerkeleyDB macro $constname";
+ }
+ }
+ eval "sub $AUTOLOAD { $val }";
+ goto &$AUTOLOAD;
+}
+
+bootstrap BerkeleyDB $VERSION;
+
+# Preloaded methods go here.
+
+
+sub ParseParameters($@)
+{
+ my ($default, @rest) = @_ ;
+ my (%got) = %$default ;
+ my (@Bad) ;
+ my ($key, $value) ;
+ my $sub = (caller(1))[3] ;
+ my %options = () ;
+ local ($Carp::CarpLevel) = 1 ;
+
+ # allow the options to be passed as a hash reference or
+ # as the complete hash.
+ if (@rest == 1) {
+
+ croak "$sub: parameter is not a reference to a hash"
+ if ref $rest[0] ne "HASH" ;
+
+ %options = %{ $rest[0] } ;
+ }
+ elsif (@rest >= 2) {
+ %options = @rest ;
+ }
+
+ while (($key, $value) = each %options)
+ {
+ $key =~ s/^-// ;
+
+ if (exists $default->{$key})
+ { $got{$key} = $value }
+ else
+ { push (@Bad, $key) }
+ }
+
+ if (@Bad) {
+ my ($bad) = join(", ", @Bad) ;
+ croak "unknown key value(s) @Bad" ;
+ }
+
+ return \%got ;
+}
+
+use UNIVERSAL qw( isa ) ;
+
+sub env_remove
+{
+ # Usage:
+ #
+ # $env = new BerkeleyDB::Env
+ # [ -Home => $path, ]
+ # [ -Config => { name => value, name => value }
+ # [ -Flags => DB_INIT_LOCK| ]
+ # ;
+
+ my $got = BerkeleyDB::ParseParameters({
+ Home => undef,
+ Flags => 0,
+ Config => undef,
+ }, @_) ;
+
+ if (defined $got->{ErrFile}) {
+ if (!isaFilehandle($got->{ErrFile})) {
+ my $handle = new IO::File ">$got->{ErrFile}"
+ or croak "Cannot open file $got->{ErrFile}: $!\n" ;
+ $got->{ErrFile} = $handle ;
+ }
+ }
+
+
+ if (defined $got->{Config}) {
+ croak("Config parameter must be a hash reference")
+ if ! ref $got->{Config} eq 'HASH' ;
+
+ @BerkeleyDB::a = () ;
+ my $k = "" ; my $v = "" ;
+ while (($k, $v) = each %{$got->{Config}}) {
+ push @BerkeleyDB::a, "$k\t$v" ;
+ }
+
+ $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef)
+ if @BerkeleyDB::a ;
+ }
+
+ return _env_remove($got) ;
+}
+
+sub db_remove
+{
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ Filename => undef,
+ Subname => undef,
+ Flags => 0,
+ Env => undef,
+ }, @_) ;
+
+ croak("Must specify a filename")
+ if ! defined $got->{Filename} ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ return _db_remove($got);
+}
+
+package BerkeleyDB::Env ;
+
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+use vars qw( %valid_config_keys ) ;
+
+sub isaFilehandle
+{
+ my $fh = shift ;
+
+ return ((isa($fh,'GLOB') or isa(\$fh,'GLOB')) and defined fileno($fh) )
+
+}
+
+%valid_config_keys = map { $_, 1 } qw( DB_DATA_DIR DB_LOG_DIR DB_TEMP_DIR
+DB_TMP_DIR ) ;
+
+sub new
+{
+ # Usage:
+ #
+ # $env = new BerkeleyDB::Env
+ # [ -Home => $path, ]
+ # [ -Mode => mode, ]
+ # [ -Config => { name => value, name => value }
+ # [ -ErrFile => filename or filehandle, ]
+ # [ -ErrPrefix => "string", ]
+ # [ -Flags => DB_INIT_LOCK| ]
+ # [ -Cachesize => number ]
+ # [ -LockDetect => ]
+ # [ -Verbose => boolean ]
+ # ;
+
+ my $pkg = shift ;
+ my $got = BerkeleyDB::ParseParameters({
+ Home => undef,
+ Server => undef,
+ Mode => 0666,
+ ErrFile => undef,
+ ErrPrefix => undef,
+ Flags => 0,
+ Cachesize => 0,
+ LockDetect => 0,
+ Verbose => 0,
+ Config => undef,
+ }, @_) ;
+
+ if (defined $got->{ErrFile}) {
+ if (!isaFilehandle($got->{ErrFile})) {
+ my $handle = new IO::File ">$got->{ErrFile}"
+ or croak "Cannot open file $got->{ErrFile}: $!\n" ;
+ $got->{ErrFile} = $handle ;
+ }
+ }
+
+
+ my %config ;
+ if (defined $got->{Config}) {
+ croak("Config parameter must be a hash reference")
+ if ! ref $got->{Config} eq 'HASH' ;
+
+ %config = %{ $got->{Config} } ;
+ @BerkeleyDB::a = () ;
+ my $k = "" ; my $v = "" ;
+ while (($k, $v) = each %config) {
+ if ($BerkeleyDB::db_version >= 3.1 && ! $valid_config_keys{$k} ) {
+ $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ;
+ croak $BerkeleyDB::Error ;
+ }
+ push @BerkeleyDB::a, "$k\t$v" ;
+ }
+
+ $got->{"Config"} = pack("p*", @BerkeleyDB::a, undef)
+ if @BerkeleyDB::a ;
+ }
+
+ my ($addr) = _db_appinit($pkg, $got) ;
+ my $obj ;
+ $obj = bless [$addr] , $pkg if $addr ;
+ if ($obj && $BerkeleyDB::db_version >= 3.1 && keys %config) {
+ my ($k, $v);
+ while (($k, $v) = each %config) {
+ if ($k eq 'DB_DATA_DIR')
+ { $obj->set_data_dir($v) }
+ elsif ($k eq 'DB_LOG_DIR')
+ { $obj->set_lg_dir($v) }
+ elsif ($k eq 'DB_TEMP_DIR' || $k eq 'DB_TMP_DIR')
+ { $obj->set_tmp_dir($v) }
+ else {
+ $BerkeleyDB::Error = "illegal name-value pair: $k $v\n" ;
+ croak $BerkeleyDB::Error
+ }
+ }
+ }
+ return $obj ;
+}
+
+
+sub TxnMgr
+{
+ my $env = shift ;
+ my ($addr) = $env->_TxnMgr() ;
+ my $obj ;
+ $obj = bless [$addr, $env] , "BerkeleyDB::TxnMgr" if $addr ;
+ return $obj ;
+}
+
+sub txn_begin
+{
+ my $env = shift ;
+ my ($addr) = $env->_txn_begin(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $env] , "BerkeleyDB::Txn" if $addr ;
+ return $obj ;
+}
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::Hash ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Hash specific
+ Ffactor => 0,
+ Nelem => 0,
+ Hash => undef,
+ DupCompare => undef,
+
+ # BerkeleyDB specific
+ ReadKey => undef,
+ WriteKey => undef,
+ ReadValue => undef,
+ WriteValue => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr) = _db_open_hash($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*TIEHASH = \&new ;
+
+
+package BerkeleyDB::Btree ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedHash ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Btree specific
+ Minkey => 0,
+ Compare => undef,
+ DupCompare => undef,
+ Prefix => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr) = _db_open_btree($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Btree::TIEHASH = \&BerkeleyDB::Btree::new ;
+
+
+package BerkeleyDB::Recno ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Recno specific
+ Delim => undef,
+ Len => undef,
+ Pad => undef,
+ Source => undef,
+ ArrayBase => 1, # lowest index in array
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("Tie needs a reference to an array")
+ if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+
+ croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
+ if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
+
+
+ $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
+
+ my ($addr) = _db_open_recno($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Recno::TIEARRAY = \&BerkeleyDB::Recno::new ;
+*BerkeleyDB::Recno::db_stat = \&BerkeleyDB::Btree::db_stat ;
+
+package BerkeleyDB::Queue ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ # Queue specific
+ Len => undef,
+ Pad => undef,
+ ArrayBase => 1, # lowest index in array
+ ExtentSize => undef,
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("Tie needs a reference to an array")
+ if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+
+ croak("ArrayBase can only be 0 or 1, parsed $got->{ArrayBase}")
+ if $got->{ArrayBase} != 1 and $got->{ArrayBase} != 0 ;
+
+ $got->{Fname} = $got->{Filename} if defined $got->{Filename} ;
+
+ my ($addr) = _db_open_queue($self, $got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr] , $self ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+*BerkeleyDB::Queue::TIEARRAY = \&BerkeleyDB::Queue::new ;
+
+## package BerkeleyDB::Text ;
+##
+## use vars qw(@ISA) ;
+## @ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+## use UNIVERSAL qw( isa ) ;
+## use Carp ;
+##
+## sub new
+## {
+## my $self = shift ;
+## my $got = BerkeleyDB::ParseParameters(
+## {
+## # Generic Stuff
+## Filename => undef,
+## #Flags => BerkeleyDB::DB_CREATE(),
+## Flags => 0,
+## Property => 0,
+## Mode => 0666,
+## Cachesize => 0,
+## Lorder => 0,
+## Pagesize => 0,
+## Env => undef,
+## #Tie => undef,
+## Txn => undef,
+##
+## # Recno specific
+## Delim => undef,
+## Len => undef,
+## Pad => undef,
+## Btree => undef,
+## }, @_) ;
+##
+## croak("Env not of type BerkeleyDB::Env")
+## if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+##
+## croak("Txn not of type BerkeleyDB::Txn")
+## if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+##
+## croak("-Tie needs a reference to an array")
+## if defined $got->{Tie} and $got->{Tie} !~ /ARRAY/ ;
+##
+## # rearange for recno
+## $got->{Source} = $got->{Filename} if defined $got->{Filename} ;
+## delete $got->{Filename} ;
+## $got->{Fname} = $got->{Btree} if defined $got->{Btree} ;
+## return BerkeleyDB::Recno::_db_open_recno($self, $got);
+## }
+##
+## *BerkeleyDB::Text::TIEARRAY = \&BerkeleyDB::Text::new ;
+## *BerkeleyDB::Text::db_stat = \&BerkeleyDB::Btree::db_stat ;
+
+package BerkeleyDB::Unknown ;
+
+use vars qw(@ISA) ;
+@ISA = qw( BerkeleyDB::Common BerkeleyDB::_tiedArray ) ;
+use UNIVERSAL qw( isa ) ;
+use Carp ;
+
+sub new
+{
+ my $self = shift ;
+ my $got = BerkeleyDB::ParseParameters(
+ {
+ # Generic Stuff
+ Filename => undef,
+ Subname => undef,
+ #Flags => BerkeleyDB::DB_CREATE(),
+ Flags => 0,
+ Property => 0,
+ Mode => 0666,
+ Cachesize => 0,
+ Lorder => 0,
+ Pagesize => 0,
+ Env => undef,
+ #Tie => undef,
+ Txn => undef,
+
+ }, @_) ;
+
+ croak("Env not of type BerkeleyDB::Env")
+ if defined $got->{Env} and ! isa($got->{Env},'BerkeleyDB::Env');
+
+ croak("Txn not of type BerkeleyDB::Txn")
+ if defined $got->{Txn} and ! isa($got->{Txn},'BerkeleyDB::Txn');
+
+ croak("-Tie needs a reference to a hash")
+ if defined $got->{Tie} and $got->{Tie} !~ /HASH/ ;
+
+ my ($addr, $type) = _db_open_unknown($got);
+ my $obj ;
+ if ($addr) {
+ $obj = bless [$addr], "BerkeleyDB::$type" ;
+ push @{ $obj }, $got->{Env} if $got->{Env} ;
+ $obj->Txn($got->{Txn}) if $got->{Txn} ;
+ }
+ return $obj ;
+}
+
+
+package BerkeleyDB::_tiedHash ;
+
+use Carp ;
+
+#sub TIEHASH
+#{
+# my $self = shift ;
+# my $db_object = shift ;
+#
+#print "Tiehash REF=[$self] [" . (ref $self) . "]\n" ;
+#
+# return bless { Obj => $db_object}, $self ;
+#}
+
+sub Tie
+{
+ # Usage:
+ #
+ # $db->Tie \%hash ;
+ #
+
+ my $self = shift ;
+
+ #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
+
+ croak("usage \$x->Tie \\%hash\n") unless @_ ;
+ my $ref = shift ;
+
+ croak("Tie needs a reference to a hash")
+ if defined $ref and $ref !~ /HASH/ ;
+
+ #tie %{ $ref }, ref($self), $self ;
+ tie %{ $ref }, "BerkeleyDB::_tiedHash", $self ;
+ return undef ;
+}
+
+
+sub TIEHASH
+{
+ my $self = shift ;
+ my $db_object = shift ;
+ #return bless $db_object, 'BerkeleyDB::Common' ;
+ return $db_object ;
+}
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ $self->db_put($key, $value) ;
+}
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) ;
+
+ return $value ;
+}
+
+sub EXISTS
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) == 0 ;
+}
+
+sub DELETE
+{
+ my $self = shift ;
+ my $key = shift ;
+ $self->db_del($key) ;
+}
+
+sub CLEAR
+{
+ my $self = shift ;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ while ($cursor->c_get($key, $value, BerkeleyDB::DB_PREV()) == 0)
+ { $cursor->c_del() }
+ #1 while $cursor->c_del() == 0 ;
+ # cursor will self-destruct
+}
+
+#sub DESTROY
+#{
+# my $self = shift ;
+# print "BerkeleyDB::_tieHash::DESTROY\n" ;
+# $self->{Cursor}->c_close() if $self->{Cursor} ;
+#}
+
+package BerkeleyDB::_tiedArray ;
+
+use Carp ;
+
+sub Tie
+{
+ # Usage:
+ #
+ # $db->Tie \@array ;
+ #
+
+ my $self = shift ;
+
+ #print "Tie method REF=[$self] [" . (ref $self) . "]\n" ;
+
+ croak("usage \$x->Tie \\%hash\n") unless @_ ;
+ my $ref = shift ;
+
+ croak("Tie needs a reference to an array")
+ if defined $ref and $ref !~ /ARRAY/ ;
+
+ #tie %{ $ref }, ref($self), $self ;
+ tie @{ $ref }, "BerkeleyDB::_tiedArray", $self ;
+ return undef ;
+}
+
+
+#sub TIEARRAY
+#{
+# my $self = shift ;
+# my $db_object = shift ;
+#
+#print "Tiearray REF=[$self] [" . (ref $self) . "]\n" ;
+#
+# return bless { Obj => $db_object}, $self ;
+#}
+
+sub TIEARRAY
+{
+ my $self = shift ;
+ my $db_object = shift ;
+ #return bless $db_object, 'BerkeleyDB::Common' ;
+ return $db_object ;
+}
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ $self->db_put($key, $value) ;
+}
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = undef ;
+ $self->db_get($key, $value) ;
+
+ return $value ;
+}
+
+*CLEAR = \&BerkeleyDB::_tiedHash::CLEAR ;
+*FIRSTKEY = \&BerkeleyDB::_tiedHash::FIRSTKEY ;
+*NEXTKEY = \&BerkeleyDB::_tiedHash::NEXTKEY ;
+
+sub EXTEND {} # don't do anything with EXTEND
+
+
+sub SHIFT
+{
+ my $self = shift;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) != 0 ;
+ return undef if $cursor->c_del() != 0 ;
+
+ return $value ;
+}
+
+
+sub UNSHIFT
+{
+ my $self = shift;
+ croak "unshift is unsupported with Queue databases"
+ if $self->type == BerkeleyDB::DB_QUEUE() ;
+ if (@_)
+ {
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ if ($cursor->c_get($key, $value, BerkeleyDB::DB_FIRST()) == 0)
+ {
+ foreach $value (reverse @_)
+ {
+ $key = 0 ;
+ $cursor->c_put($key, $value, BerkeleyDB::DB_BEFORE()) ;
+ }
+ }
+ }
+}
+
+sub PUSH
+{
+ my $self = shift;
+ if (@_)
+ {
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ if ($cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) == 0)
+ {
+ foreach $value (@_)
+ {
+ ++ $key ;
+ $self->db_put($key, $value) ;
+ }
+ }
+
+# can use this when DB_APPEND is fixed.
+# foreach $value (@_)
+# {
+# my $status = $cursor->c_put($key, $value, BerkeleyDB::DB_AFTER()) ;
+#print "[$status]\n" ;
+# }
+ }
+}
+
+sub POP
+{
+ my $self = shift;
+ my ($key, $value) = (0, 0) ;
+ my $cursor = $self->db_cursor() ;
+ return undef if $cursor->c_get($key, $value, BerkeleyDB::DB_LAST()) != 0 ;
+ return undef if $cursor->c_del() != 0 ;
+
+ return $value ;
+}
+
+sub SPLICE
+{
+ my $self = shift;
+ croak "SPLICE is not implemented yet" ;
+}
+
+*shift = \&SHIFT ;
+*unshift = \&UNSHIFT ;
+*push = \&PUSH ;
+*pop = \&POP ;
+*clear = \&CLEAR ;
+*length = \&FETCHSIZE ;
+
+sub STORESIZE
+{
+ croak "STORESIZE is not implemented yet" ;
+#print "STORESIZE @_\n" ;
+# my $self = shift;
+# my $length = shift ;
+# my $current_length = $self->FETCHSIZE() ;
+#print "length is $current_length\n";
+#
+# if ($length < $current_length) {
+#print "Make smaller $length < $current_length\n" ;
+# my $key ;
+# for ($key = $current_length - 1 ; $key >= $length ; -- $key)
+# { $self->db_del($key) }
+# }
+# elsif ($length > $current_length) {
+#print "Make larger $length > $current_length\n" ;
+# $self->db_put($length-1, "") ;
+# }
+# else { print "stay the same\n" }
+
+}
+
+
+
+#sub DESTROY
+#{
+# my $self = shift ;
+# print "BerkeleyDB::_tieArray::DESTROY\n" ;
+#}
+
+
+package BerkeleyDB::Common ;
+
+
+use Carp ;
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+sub Txn
+{
+ my $self = shift ;
+ my $txn = shift ;
+ #print "BerkeleyDB::Common::Txn db [$self] txn [$txn]\n" ;
+ if ($txn) {
+ $self->_Txn($txn) ;
+ push @{ $txn }, $self ;
+ }
+ else {
+ $self->_Txn() ;
+ }
+ #print "end BerkeleyDB::Common::Txn \n";
+}
+
+
+sub get_dup
+{
+ croak "Usage: \$db->get_dup(key [,flag])\n"
+ unless @_ == 2 or @_ == 3 ;
+
+ my $db = shift ;
+ my $key = shift ;
+ my $flag = shift ;
+ my $value = 0 ;
+ my $origkey = $key ;
+ my $wantarray = wantarray ;
+ my %values = () ;
+ my @values = () ;
+ my $counter = 0 ;
+ my $status = 0 ;
+ my $cursor = $db->db_cursor() ;
+
+ # iterate through the database until either EOF ($status == 0)
+ # or a different key is encountered ($key ne $origkey).
+ for ($status = $cursor->c_get($key, $value, BerkeleyDB::DB_SET()) ;
+ $status == 0 and $key eq $origkey ;
+ $status = $cursor->c_get($key, $value, BerkeleyDB::DB_NEXT()) ) {
+ # save the value or count number of matches
+ if ($wantarray) {
+ if ($flag)
+ { ++ $values{$value} }
+ else
+ { push (@values, $value) }
+ }
+ else
+ { ++ $counter }
+
+ }
+
+ return ($wantarray ? ($flag ? %values : @values) : $counter) ;
+}
+
+sub db_cursor
+{
+ my $db = shift ;
+ my ($addr) = $db->_db_cursor(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $db] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+sub db_join
+{
+ croak 'Usage: $db->BerkeleyDB::Common::db_join([cursors], flags=0)'
+ if @_ < 2 || @_ > 3 ;
+ my $db = shift ;
+ my ($addr) = $db->_db_join(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $db] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+package BerkeleyDB::Cursor ;
+
+sub c_close
+{
+ my $cursor = shift ;
+ $cursor->[1] = "" ;
+ return $cursor->_c_close() ;
+}
+
+sub c_dup
+{
+ my $cursor = shift ;
+ my ($addr) = $cursor->_c_dup(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $cursor->[1]] , "BerkeleyDB::Cursor" if $addr ;
+ return $obj ;
+}
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::TxnMgr ;
+
+sub DESTROY
+{
+ my $self = shift ;
+ $self->_DESTROY() ;
+}
+
+sub txn_begin
+{
+ my $txnmgr = shift ;
+ my ($addr) = $txnmgr->_txn_begin(@_) ;
+ my $obj ;
+ $obj = bless [$addr, $txnmgr] , "BerkeleyDB::Txn" if $addr ;
+ return $obj ;
+}
+
+package BerkeleyDB::Txn ;
+
+sub Txn
+{
+ my $self = shift ;
+ my $db ;
+ # keep a reference to each db in the txn object
+ foreach $db (@_) {
+ $db->_Txn($self) ;
+ push @{ $self}, $db ;
+ }
+}
+
+sub txn_commit
+{
+ my $self = shift ;
+ $self->disassociate() ;
+ my $status = $self->_txn_commit() ;
+ return $status ;
+}
+
+sub txn_abort
+{
+ my $self = shift ;
+ $self->disassociate() ;
+ my $status = $self->_txn_abort() ;
+ return $status ;
+}
+
+sub disassociate
+{
+ my $self = shift ;
+ my $db ;
+ while ( @{ $self } > 2) {
+ $db = pop @{ $self } ;
+ $db->Txn() ;
+ }
+ #print "end disassociate\n" ;
+}
+
+
+sub DESTROY
+{
+ my $self = shift ;
+
+ $self->disassociate() ;
+ # first close the close the transaction
+ $self->_DESTROY() ;
+}
+
+package BerkeleyDB::Term ;
+
+END
+{
+ close_everything() ;
+}
+
+
+package BerkeleyDB ;
+
+
+
+# Autoload methods go after =cut, and are processed by the autosplit program.
+
+1;
+__END__
+
+
diff --git a/db/perl/BerkeleyDB/BerkeleyDB.pod b/db/perl/BerkeleyDB/BerkeleyDB.pod
new file mode 100644
index 000000000..0509fd5fe
--- /dev/null
+++ b/db/perl/BerkeleyDB/BerkeleyDB.pod
@@ -0,0 +1,1760 @@
+=head1 NAME
+
+BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
+
+=head1 SYNOPSIS
+
+ use BerkeleyDB;
+
+ $env = new BerkeleyDB::Env [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
+ $db = new BerkeleyDB::Hash [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
+ $db = new BerkeleyDB::Btree [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Recno', [OPTIONS] ;
+ $db = new BerkeleyDB::Recno [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Queue', [OPTIONS] ;
+ $db = new BerkeleyDB::Queue [OPTIONS] ;
+
+ $db = new BerkeleyDB::Unknown [OPTIONS] ;
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+
+ $hash{$key} = $value ;
+ $value = $hash{$key} ;
+ each %hash ;
+ keys %hash ;
+ values %hash ;
+
+ $status = $db->db_get()
+ $status = $db->db_put() ;
+ $status = $db->db_del() ;
+ $status = $db->db_sync() ;
+ $status = $db->db_close() ;
+ $hash_ref = $db->db_stat() ;
+ $status = $db->db_key_range();
+ $type = $db->type() ;
+ $status = $db->status() ;
+ $boolean = $db->byteswapped() ;
+
+ ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+ ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+ $cursor = $db->db_cursor([$flags]) ;
+ $newcursor = $cursor->c_dup([$flags]);
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_put() ;
+ $status = $cursor->c_del() ;
+ $status = $cursor->c_count() ;
+ $status = $cursor->status() ;
+ $status = $cursor->c_close() ;
+
+ $cursor = $db->db_join() ;
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_close() ;
+
+ $status = $env->txn_checkpoint()
+ $hash_ref = $env->txn_stat()
+ $status = $env->setmutexlocks()
+
+ $txn = $env->txn_begin() ;
+ $db->Txn($txn);
+ $txn->Txn($db1, $db2,...);
+ $status = $txn->txn_prepare()
+ $status = $txn->txn_commit()
+ $status = $txn->txn_abort()
+ $status = $txn->txn_id()
+
+ $status = $env->set_lg_dir();
+ $status = $env->set_lg_bsize();
+ $status = $env->set_lg_max();
+
+ $status = $env->set_data_dir() ;
+ $status = $env->set_tmp_dir() ;
+
+ $BerkeleyDB::Error
+ $BerkeleyDB::db_version
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ # deprecated, but supported
+ $txn_mgr = $env->TxnMgr();
+ $status = $txn_mgr->txn_checkpoint()
+ $hash_ref = $txn_mgr->txn_stat()
+ $txn = $txn_mgr->txn_begin() ;
+
+=head1 DESCRIPTION
+
+B<NOTE: This document is still under construction. Expect it to be
+incomplete in places.>
+
+This Perl module provides an interface to most of the functionality
+available in Berkeley DB versions 2 and 3. In general it is safe to assume
+that the interface provided here to be identical to the Berkeley DB
+interface. The main changes have been to make the Berkeley DB API work
+in a Perl way. Note that if you are using Berkeley DB 2.x, the new
+features available in Berkeley DB 3.x are not available via this module.
+
+The reader is expected to be familiar with the Berkeley DB
+documentation. Where the interface provided here is identical to the
+Berkeley DB library and the... TODO
+
+The B<db_appinit>, B<db_cursor>, B<db_open> and B<db_txn> man pages are
+particularly relevant.
+
+The interface to Berkeley DB is implemented with a number of Perl
+classes.
+
+=head1 ENV CLASS
+
+The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
+function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
+B<DBENV-E<gt>open> in Berkeley DB 3.x. Its purpose is to initialise a
+number of sub-systems that can then be used in a consistent way in all
+the databases you make use of the environment.
+
+If you don't intend using transactions, locking or logging, then you
+shouldn't need to make use of B<BerkeleyDB::Env>.
+
+=head2 Synopsis
+
+ $env = new BerkeleyDB::Env
+ [ -Home => $path, ]
+ [ -Server => $name, ]
+ [ -CacheSize => $number, ]
+ [ -Config => { name => value, name => value }, ]
+ [ -ErrFile => filename or filehandle, ]
+ [ -ErrPrefix => "string", ]
+ [ -Flags => number, ]
+ [ -LockDetect => number, ]
+ [ -Verbose => boolean, ]
+
+=over 5
+
+All the parameters to the BerkeleyDB::Env constructor are optional.
+
+=item -Home
+
+If present, this parameter should point to an existing directory. Any
+files that I<aren't> specified with an absolute path in the sub-systems
+that are initialised by the BerkeleyDB::Env class will be assumed to
+live in the B<Home> directory.
+
+For example, in the code fragment below the database "fred.db" will be
+opened in the directory "/home/databases" because it was specified as a
+relative path, but "joe.db" will be opened in "/other" because it was
+part of an absolute path.
+
+ $env = new BerkeleyDB::Env
+ -Home => "/home/databases"
+ ...
+
+ $db1 = new BerkeleyDB::Hash
+ -Filename = "fred.db",
+ -Env => $env
+ ...
+
+ $db2 = new BerkeleyDB::Hash
+ -Filename = "/other/joe.db",
+ -Env => $env
+ ...
+
+=item -Server
+
+If present, this parameter should be the hostname of a server that is running
+the Berkeley DB RPC server. All databases will be accessed via the RPC server.
+
+=item -Cachesize
+
+If present, this parameter sets the size of the environments shared memory
+buffer pool.
+
+=item -Config
+
+This is a variation on the C<-Home> parameter, but it allows finer
+control of where specific types of files will be stored.
+
+The parameter expects a reference to a hash. Valid keys are:
+B<DB_DATA_DIR>, B<DB_LOG_DIR> and B<DB_TMP_DIR>
+
+The code below shows an example of how it can be used.
+
+ $env = new BerkeleyDB::Env
+ -Config => { DB_DATA_DIR => "/home/databases",
+ DB_LOG_DIR => "/home/logs",
+ DB_TMP_DIR => "/home/tmp"
+ }
+ ...
+
+=item -ErrFile
+
+Expects either the name of a file or a reference to a filehandle. Any
+errors generated internally by Berkeley DB will be logged to this file.
+
+=item -ErrPrefix
+
+Allows a prefix to be added to the error messages before they are sent
+to B<-ErrFile>.
+
+=item -Flags
+
+The B<Flags> parameter specifies both which sub-systems to initialise,
+as well as a number of environment-wide options.
+See the Berkeley DB documentation for more details of these options.
+
+Any of the following can be specified by OR'ing them:
+
+B<DB_CREATE>
+
+If any of the files specified do not already exist, create them.
+
+B<DB_INIT_CDB>
+
+Initialise the Concurrent Access Methods
+
+B<DB_INIT_LOCK>
+
+Initialise the Locking sub-system.
+
+B<DB_INIT_LOG>
+
+Initialise the Logging sub-system.
+
+B<DB_INIT_MPOOL>
+
+Initialise the ...
+
+B<DB_INIT_TXN>
+
+Initialise the ...
+
+B<DB_MPOOL_PRIVATE>
+
+Initialise the ...
+
+B<DB_INIT_MPOOL> is also specified.
+
+Initialise the ...
+
+B<DB_NOMMAP>
+
+Initialise the ...
+
+B<DB_RECOVER>
+
+
+
+B<DB_RECOVER_FATAL>
+
+B<DB_THREAD>
+
+B<DB_TXN_NOSYNC>
+
+B<DB_USE_ENVIRON>
+
+B<DB_USE_ENVIRON_ROOT>
+
+=item -LockDetect
+
+Specifies what to do when a lock conflict occurs. The value should be one of
+
+B<DB_LOCK_DEFAULT>
+
+B<DB_LOCK_OLDEST>
+
+B<DB_LOCK_RANDOM>
+
+B<DB_LOCK_YOUNGEST>
+
+=item -Verbose
+
+Add extra debugging information to the messages sent to B<-ErrFile>.
+
+=back
+
+=head2 Methods
+
+The environment class has the following methods:
+
+=over 5
+
+=item $env->errPrefix("string") ;
+
+This method is identical to the B<-ErrPrefix> flag. It allows the
+error prefix string to be changed dynamically.
+
+=item $txn = $env->TxnMgr()
+
+Constructor for creating a B<TxnMgr> object.
+See L<"TRANSACTIONS"> for more details of using transactions.
+
+This method is deprecated. Access the transaction methods using the B<txn_>
+methods below from the environment object directly.
+
+=item $env->txn_begin()
+
+TODO
+
+=item $env->txn_stat()
+
+TODO
+
+=item $env->txn_checkpoint()
+
+TODO
+
+=item $env->status()
+
+Returns the status of the last BerkeleyDB::Env method.
+
+=item $env->setmutexlocks()
+
+Only available in Berkeley Db 3.0 or greater. Calls
+B<db_env_set_mutexlocks> when used with Berkeley DB 3.1.x. When used with
+Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
+
+=back
+
+=head2 Examples
+
+TODO.
+
+=head1 THE DATABASE CLASSES
+
+B<BerkeleyDB> supports the following database formats:
+
+=over 5
+
+=item B<BerkeleyDB::Hash>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using B<BerkeleyDB::Hash> are not compatible with any
+of the other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most applications,
+is built into BerkeleyDB. If you do need to use your own hashing algorithm
+it is possible to write your own in Perl and have B<BerkeleyDB> use
+it instead.
+
+=item B<BerkeleyDB::Btree>
+
+The Btree format allows arbitrary key/value pairs to be stored in a
+B+tree.
+
+As with the B<BerkeleyDB::Hash> format, it is possible to provide a
+user defined Perl routine to perform the comparison of keys. By default,
+though, the keys are stored in lexical order.
+
+=item B<BerkeleyDB::Recno>
+
+TODO.
+
+
+=item B<BerkeleyDB::Queue>
+
+TODO.
+
+=item B<BerkeleyDB::Unknown>
+
+This isn't a database format at all. It is used when you want to open an
+existing Berkeley DB database without having to know what type is it.
+
+=back
+
+
+Each of the database formats described above is accessed via a
+corresponding B<BerkeleyDB> class. These will be described in turn in
+the next sections.
+
+=head1 BerkeleyDB::Hash
+
+Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Hash
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Hash',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+
+When the "tie" interface is used, reading from and writing to the database
+is achieved via the tied hash. In this case the database operates like
+a Perl associative array that happens to be stored on disk.
+
+In addition to the high-level tied hash interface, it is possible to
+make use of the underlying methods provided by Berkeley DB
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Hash> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item -Ffactor
+
+=item -Nelem
+
+See the Berkeley DB documentation for details of these options.
+
+=item -Hash
+
+Allows you to provide a user defined hash function. If not specified,
+a default hash function is used. Here is a template for a user-defined
+hash function
+
+ sub hash
+ {
+ my ($data) = shift ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Hash => \&hash,
+ ...
+
+See L<""> for an example.
+
+=item -DupCompare
+
+Used in conjunction with the B<DB_DUPOSRT> flag.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Property => DB_DUP|DB_DUPSORT,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+
+=head2 Methods
+
+B<BerkeleyDB::Hash> only supports the standard database methods.
+See L<COMMON DATABASE METHODS>.
+
+=head2 A Simple Tied Hash Example
+
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved from a Hash database are in an apparently random order.
+
+=head2 Another Simple Hash Example
+
+Do the same as the previous example but not using tie.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+=head2 Duplicate keys
+
+The code below is a variation on the examples above. This time the hash has
+been inverted. The key this time is colour and the value is the fruit name.
+The B<DB_DUP> flag has been specified to allow duplicates.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+here is the output:
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> banana
+ green -> apple
+
+=head2 Sorting Duplicate Keys
+
+In the previous example, when there were duplicate keys, the values are
+sorted in the order they are stored in. The code below is
+identical to the previous example except the B<DB_DUPSORT> flag is
+specified.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+
+Notice that in the output below the duplicate values are sorted.
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> apple
+ green -> banana
+
+=head2 Custom Sorting Duplicate Keys
+
+Another variation
+
+TODO
+
+=head2 Changing the hash
+
+TODO
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Btree
+
+Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+
+ $db = new BerkeleyDB::Btree
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Btree',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Btree> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item Minkey
+
+TODO
+
+=item Compare
+
+Allow you to override the default sort order used in the database. See
+L<"Changing the sort order"> for an example.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Compare => \&compare,
+ ...
+
+=item Prefix
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Prefix => \&prefix,
+ ...
+=item DupCompare
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+=head2 Methods
+
+B<BerkeleyDB::Btree> supports the following database methods.
+See also L<COMMON DATABASE METHODS>.
+
+All the methods below return 0 to indicate success.
+
+=over 5
+
+=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
+
+Given a key, C<$key>, this method returns the proportion of keys less than
+C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
+proportion greater than C<$key> in C<$greater>.
+
+The proportion is returned as a double in the range 0.0 to 1.0.
+
+=back
+
+=head2 A Simple Btree Example
+
+The code below is a simple example of using a btree database.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above. The keys have been sorted using
+Berkeley DB's default sorting algorithm.
+
+ Smith
+ Wall
+ mouse
+
+
+=head2 Changing the sort order
+
+It is possible to supply your own sorting algorithm if the one that Berkeley
+DB used isn't suitable. The code below is identical to the previous example
+except for the case insensitive compare function.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=back
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Recno
+
+Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Recno
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Recno',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+=head2 A Recno Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head1 BerkeleyDB::Queue
+
+Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
+type B<DB_QUEUE> in Berkeley DB 3.x. This database format isn't available if
+you use Berkeley DB 2.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Queue
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -ExtentSize => number, ]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Queue',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+
+
+=head1 BerkeleyDB::Unknown
+
+This class is used to open an existing database.
+
+Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
+Berkeley DB 3.x.
+
+The constructor looks like this:
+
+ $db = new BerkeleyDB::Unknown
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+
+
+=head2 An example
+
+=head1 COMMON OPTIONS
+
+All database access class constructors support the common set of
+options defined below. All are optional.
+
+=over 5
+
+=item -Filename
+
+The database filename. If no filename is specified, a temporary file will
+be created and removed once the program terminates.
+
+=item -Subname
+
+Specifies the name of the sub-database to open.
+This option is only valid if you are using Berkeley DB 3.x.
+
+=item -Flags
+
+Specify how the database will be opened/created. The valid flags are:
+
+B<DB_CREATE>
+
+Create any underlying files, as necessary. If the files do not already
+exist and the B<DB_CREATE> flag is not specified, the call will fail.
+
+B<DB_NOMMAP>
+
+Not supported by BerkeleyDB.
+
+B<DB_RDONLY>
+
+Opens the database in read-only mode.
+
+B<DB_THREAD>
+
+Not supported by BerkeleyDB.
+
+B<DB_TRUNCATE>
+
+If the database file already exists, remove all the data before
+opening it.
+
+=item -Mode
+
+Determines the file protection when the database is created. Defaults
+to 0666.
+
+=item -Cachesize
+
+=item -Lorder
+
+=item -Pagesize
+
+=item -Env
+
+When working under a Berkeley DB environment, this parameter
+
+Defaults to no environment.
+
+=item -Txn
+
+TODO.
+
+=back
+
+=head1 COMMON DATABASE METHODS
+
+All the database interfaces support the common set of methods defined
+below.
+
+All the methods below return 0 to indicate success.
+
+=head2 $status = $db->db_get($key, $value [, $flags])
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. If it exists, the value read from the database is
+returned in the C<$value> parameter.
+
+The B<$flags> parameter is optional. If present, it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_GET_BOTH>
+
+When the B<DB_GET_BOTH> flag is specified, B<db_get> checks for the
+existence of B<both> the C<$key> B<and> C<$value> in the database.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO
+
+=back
+
+
+=head2 $status = $db->db_put($key, $value [, $flags])
+
+Stores a key/value pair in the database.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_APPEND>
+
+This flag is only applicable when accessing a B<BerkeleyDB::Recno>
+database.
+
+TODO.
+
+
+=item B<DB_NOOVERWRITE>
+
+If this flag is specified and C<$key> already exists in the database,
+the call to B<db_put> will return B<DB_KEYEXIST>.
+
+=back
+
+=head2 $status = $db->db_del($key [, $flags])
+
+Deletes a key/value pair in the database associated with C<$key>.
+If duplicate keys are enabled in the database, B<db_del> will delete
+B<all> key/value pairs with key C<$key>.
+
+The B<$flags> parameter is optional and is currently unused.
+
+=head2 $status = $db->db_sync()
+
+If any parts of the database are in memory, write them to the database.
+
+=head2 $cursor = $db->db_cursor([$flags])
+
+Creates a cursor object. This is used to access the contents of the
+database sequentially. See L<CURSORS> for details of the methods
+available when working with cursors.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+
+TODO
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+TODO
+
+=head2 $db->byteswapped()
+
+TODO
+
+=head2 $db->type()
+
+Returns the type of the database. The possible return code are B<DB_HASH>
+for a B<BerkeleyDB::Hash> database, B<DB_BTREE> for a B<BerkeleyDB::Btree>
+database and B<DB_RECNO> for a B<BerkeleyDB::Recno> database. This method
+is typically used when a database has been opened with
+B<BerkeleyDB::Unknown>.
+
+=item $ref = $db->db_stat()
+
+Returns a reference to an associative array containing information about
+the database. The keys of the associative array correspond directly to the
+names of the fields defined in the Berkeley DB documentation. For example,
+in the DB documentation, the field B<bt_version> stores the version of the
+Btree database. Assuming you called B<db_stat> on a Btree database the
+equivalent field would be accessed as follows:
+
+ $version = $ref->{'bt_version'} ;
+
+If you are using Berkeley DB 3.x, this method will work will all database
+formats. When DB 2.x is used, it only works with B<BerkeleyDB::Btree>.
+
+=head2 $status = $db->status()
+
+Returns the status of the last C<$db> method called.
+
+=head1 CURSORS
+
+A cursor is used whenever you want to access the contents of a database
+in sequential order.
+A cursor object is created with the C<db_cursor>
+
+A cursor object has the following methods available:
+
+=head2 $newcursor = $cursor->c_dup($flags)
+
+Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
+
+The C<$flags> parameter is optional and can take the following value:
+
+=over 5
+
+=item DB_POSITION
+
+When present this flag will position the new cursor at the same place as the
+existing cursor.
+
+=back
+
+=head2 $status = $cursor->c_get($key, $value, $flags)
+
+Reads a key/value pair from the database, returning the data in C<$key>
+and C<$value>. The key/value pair actually read is controlled by the
+C<$flags> parameter, which can take B<one> of the following values:
+
+=over 5
+
+=item B<DB_FIRST>
+
+Set the cursor to point to the first key/value pair in the
+database. Return the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_LAST>
+
+Set the cursor to point to the last key/value pair in the database. Return
+the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_NEXT>
+
+If the cursor is already pointing to a key/value pair, it will be
+incremented to point to the next key/value pair and return its contents.
+
+If the cursor isn't initialised, B<DB_NEXT> works just like B<DB_FIRST>.
+
+If the cursor is already positioned at the last key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_NEXT_DUP>
+
+This flag is only valid when duplicate keys have been enabled in
+a database.
+If the cursor is already pointing to a key/value pair and the key of
+the next key/value pair is identical, the cursor will be incremented to
+point to it and their contents returned.
+
+=item B<DB_PREV>
+
+If the cursor is already pointing to a key/value pair, it will be
+decremented to point to the previous key/value pair and return its
+contents.
+
+If the cursor isn't initialised, B<DB_PREV> works just like B<DB_LAST>.
+
+If the cursor is already positioned at the first key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_CURRENT>
+
+If the cursor has been set to point to a key/value pair, return their
+contents.
+If the key/value pair referenced by the cursor has been deleted, B<c_get>
+will return B<DB_KEYEMPTY>.
+
+=item B<DB_SET>
+
+Set the cursor to point to the key/value pair referenced by B<$key>
+and return the value in B<$value>.
+
+=item B<DB_SET_RANGE>
+
+This flag is a variation on the B<DB_SET> flag. As well as returning
+the value, it also returns the key, via B<$key>.
+When used with a B<BerkeleyDB::Btree> database the key matched by B<c_get>
+will be the shortest key (in length) which is greater than or equal to
+the key supplied, via B<$key>. This allows partial key searches.
+See ??? for an example of how to use this flag.
+
+=item B<DB_GET_BOTH>
+
+Another variation on B<DB_SET>. This one returns both the key and
+the value.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=item B<DB_GET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 $status = $cursor->c_put($key, $value, $flags)
+
+Stores the key/value pair in the database. The position that the data is
+stored in the database is controlled by the C<$flags> parameter, which
+must take B<one> of the following values:
+
+=over 5
+
+=item B<DB_AFTER>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately after the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+
+=item B<DB_BEFORE>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately before the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+=item B<DB_CURRENT>
+
+If the cursor has been initialised, replace the value of the key/value
+pair stored in the database with the contents of B<$value>.
+
+=item B<DB_KEYFIRST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the first entry in
+the duplicates for the particular key.
+
+=item B<DB_KEYLAST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the last entry in
+the duplicates for the particular key.
+
+=back
+
+=head2 $status = $cursor->c_del([$flags])
+
+This method deletes the key/value pair associated with the current cursor
+position. The cursor position will not be changed by this operation, so
+any subsequent cursor operation must first initialise the cursor to
+point to a valid key/value pair.
+
+If the key/value pair associated with the cursor have already been
+deleted, B<c_del> will return B<DB_KEYEMPTY>.
+
+The B<$flags> parameter is not used at present.
+
+=head2 $status = $cursor->c_del($cnt [, $flags])
+
+Stores the number of duplicates at the current cursor position in B<$cnt>.
+
+The B<$flags> parameter is not used at present. This method needs
+Berkeley DB 3.1 or better.
+
+=head2 $status = $cursor->status()
+
+Returns the status of the last cursor method as a dual type.
+
+=head2 Cursor Examples
+
+TODO
+
+Iterating from first to last, then in reverse.
+
+examples of each of the flags.
+
+=head1 JOIN
+
+Join support for BerkeleyDB is in progress. Watch this space.
+
+TODO
+
+=head1 TRANSACTIONS
+
+TODO.
+
+=head1 DBM Filters
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a DBM
+database. All of the database classes (BerkeleyDB::Hash,
+BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
+
+There are four methods associated with DBM Filters. All work
+identically, and each is used to install (or uninstall) a single DBM
+Filter. Each expects a single parameter, namely a reference to a sub.
+The only difference between them is the place that the filter is
+installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database that you need
+to share with a third-party C application. The C application assumes
+that I<all> keys and values are NULL terminated. Unfortunately when
+Perl writes to DBM databases it doesn't use NULL termination, so your
+Perl application will have to manage NULL termination itself. When you
+write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "something" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 Using BerkeleyDB with MLDBM
+
+Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
+module. The code fragment below shows how to open associate MLDBM with
+BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
+BerkeleyDB::Btree with BerkeleyDB::Hash.
+
+ use strict ;
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = 'testmldbm' ;
+ my %o ;
+
+ unlink $filename ;
+ tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open database '$filename: $!\n";
+
+See the MLDBM documentation for information on how to use the module
+and for details of its limitations.
+
+=head1 EXAMPLES
+
+TODO.
+
+=head1 HINTS & TIPS
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings
+are not. See L<An Example -- the NULL termination problem.> in the DBM
+FILTERS section for a generic way to work around this problem.
+
+
+=head2 The untie Gotcha
+
+TODO
+
+=head1 COMMON QUESTIONS
+
+This section attempts to answer some of the more common questions that
+I get asked.
+
+
+=head2 Relationship with DB_File
+
+Before Berkeley DB 2.x was written there was only one Perl module that
+interfaced to Berkeley DB. That module is called B<DB_File>. Although
+B<DB_File> can be build with Berkeley DB 1.x, 2.x or 3.x, it only provides
+an interface to the functionality available in Berkeley DB 1.x. That
+means that it doesn't support transactions, locking or any of the other
+new features available in DB 2.x or 3.x.
+
+=head2 How do I store Perl data structures with BerkeleyDB?
+
+See L<Using BerkeleyDB with MLDBM>.
+
+=head1 HISTORY
+
+See the Changes file.
+
+=head1 AVAILABILITY
+
+The most recent version of B<BerkeleyDB> can always be found
+on CPAN (see L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/BerkeleyDB>.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-2001 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<BerkeleyDB> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of Berkeley
+ DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of BerkeleyDB.
+See L<"AUTHOR"> for details.
+
+
+=head1 AUTHOR
+
+Paul Marquess E<lt>Paul.Marquess@btinternet.comE<gt>.
+
+Questions about Berkeley DB may be addressed to E<lt>db@sleepycat.comE<gt>.
+
+=head1 SEE ALSO
+
+perl(1), DB_File, Berkeley DB.
+
+=cut
diff --git a/db/perl/BerkeleyDB/BerkeleyDB.pod.P b/db/perl/BerkeleyDB/BerkeleyDB.pod.P
new file mode 100644
index 000000000..da1c5e60a
--- /dev/null
+++ b/db/perl/BerkeleyDB/BerkeleyDB.pod.P
@@ -0,0 +1,1527 @@
+=head1 NAME
+
+BerkeleyDB - Perl extension for Berkeley DB version 2 or 3
+
+=head1 SYNOPSIS
+
+ use BerkeleyDB;
+
+ $env = new BerkeleyDB::Env [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Hash', [OPTIONS] ;
+ $db = new BerkeleyDB::Hash [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Btree', [OPTIONS] ;
+ $db = new BerkeleyDB::Btree [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Recno', [OPTIONS] ;
+ $db = new BerkeleyDB::Recno [OPTIONS] ;
+
+ $db = tie %hash, 'BerkeleyDB::Queue', [OPTIONS] ;
+ $db = new BerkeleyDB::Queue [OPTIONS] ;
+
+ $db = new BerkeleyDB::Unknown [OPTIONS] ;
+
+ $status = BerkeleyDB::db_remove [OPTIONS]
+
+ $hash{$key} = $value ;
+ $value = $hash{$key} ;
+ each %hash ;
+ keys %hash ;
+ values %hash ;
+
+ $status = $db->db_get()
+ $status = $db->db_put() ;
+ $status = $db->db_del() ;
+ $status = $db->db_sync() ;
+ $status = $db->db_close() ;
+ $hash_ref = $db->db_stat() ;
+ $status = $db->db_key_range();
+ $type = $db->type() ;
+ $status = $db->status() ;
+ $boolean = $db->byteswapped() ;
+
+ ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+ ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+ $cursor = $db->db_cursor([$flags]) ;
+ $newcursor = $cursor->c_dup([$flags]);
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_put() ;
+ $status = $cursor->c_del() ;
+ $status = $cursor->c_count() ;
+ $status = $cursor->status() ;
+ $status = $cursor->c_close() ;
+
+ $cursor = $db->db_join() ;
+ $status = $cursor->c_get() ;
+ $status = $cursor->c_close() ;
+
+ $status = $env->txn_checkpoint()
+ $hash_ref = $env->txn_stat()
+ $status = $env->setmutexlocks()
+
+ $txn = $env->txn_begin() ;
+ $db->Txn($txn);
+ $txn->Txn($db1, $db2,...);
+ $status = $txn->txn_prepare()
+ $status = $txn->txn_commit()
+ $status = $txn->txn_abort()
+ $status = $txn->txn_id()
+
+ $status = $env->set_lg_dir();
+ $status = $env->set_lg_bsize();
+ $status = $env->set_lg_max();
+
+ $status = $env->set_data_dir() ;
+ $status = $env->set_tmp_dir() ;
+
+ $BerkeleyDB::Error
+ $BerkeleyDB::db_version
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ # deprecated, but supported
+ $txn_mgr = $env->TxnMgr();
+ $status = $txn_mgr->txn_checkpoint()
+ $hash_ref = $txn_mgr->txn_stat()
+ $txn = $txn_mgr->txn_begin() ;
+
+=head1 DESCRIPTION
+
+B<NOTE: This document is still under construction. Expect it to be
+incomplete in places.>
+
+This Perl module provides an interface to most of the functionality
+available in Berkeley DB versions 2 and 3. In general it is safe to assume
+that the interface provided here to be identical to the Berkeley DB
+interface. The main changes have been to make the Berkeley DB API work
+in a Perl way. Note that if you are using Berkeley DB 2.x, the new
+features available in Berkeley DB 3.x are not available via this module.
+
+The reader is expected to be familiar with the Berkeley DB
+documentation. Where the interface provided here is identical to the
+Berkeley DB library and the... TODO
+
+The B<db_appinit>, B<db_cursor>, B<db_open> and B<db_txn> man pages are
+particularly relevant.
+
+The interface to Berkeley DB is implemented with a number of Perl
+classes.
+
+=head1 ENV CLASS
+
+The B<BerkeleyDB::Env> class provides an interface to the Berkeley DB
+function B<db_appinit> in Berkeley DB 2.x or B<db_env_create> and
+B<DBENV-E<gt>open> in Berkeley DB 3.x. Its purpose is to initialise a
+number of sub-systems that can then be used in a consistent way in all
+the databases you make use of the environment.
+
+If you don't intend using transactions, locking or logging, then you
+shouldn't need to make use of B<BerkeleyDB::Env>.
+
+=head2 Synopsis
+
+ $env = new BerkeleyDB::Env
+ [ -Home => $path, ]
+ [ -Server => $name, ]
+ [ -CacheSize => $number, ]
+ [ -Config => { name => value, name => value }, ]
+ [ -ErrFile => filename or filehandle, ]
+ [ -ErrPrefix => "string", ]
+ [ -Flags => number, ]
+ [ -LockDetect => number, ]
+ [ -Verbose => boolean, ]
+
+=over 5
+
+All the parameters to the BerkeleyDB::Env constructor are optional.
+
+=item -Home
+
+If present, this parameter should point to an existing directory. Any
+files that I<aren't> specified with an absolute path in the sub-systems
+that are initialised by the BerkeleyDB::Env class will be assumed to
+live in the B<Home> directory.
+
+For example, in the code fragment below the database "fred.db" will be
+opened in the directory "/home/databases" because it was specified as a
+relative path, but "joe.db" will be opened in "/other" because it was
+part of an absolute path.
+
+ $env = new BerkeleyDB::Env
+ -Home => "/home/databases"
+ ...
+
+ $db1 = new BerkeleyDB::Hash
+ -Filename = "fred.db",
+ -Env => $env
+ ...
+
+ $db2 = new BerkeleyDB::Hash
+ -Filename = "/other/joe.db",
+ -Env => $env
+ ...
+
+=item -Server
+
+If present, this parameter should be the hostname of a server that is running
+the Berkeley DB RPC server. All databases will be accessed via the RPC server.
+
+=item -Cachesize
+
+If present, this parameter sets the size of the environments shared memory
+buffer pool.
+
+=item -Config
+
+This is a variation on the C<-Home> parameter, but it allows finer
+control of where specific types of files will be stored.
+
+The parameter expects a reference to a hash. Valid keys are:
+B<DB_DATA_DIR>, B<DB_LOG_DIR> and B<DB_TMP_DIR>
+
+The code below shows an example of how it can be used.
+
+ $env = new BerkeleyDB::Env
+ -Config => { DB_DATA_DIR => "/home/databases",
+ DB_LOG_DIR => "/home/logs",
+ DB_TMP_DIR => "/home/tmp"
+ }
+ ...
+
+=item -ErrFile
+
+Expects either the name of a file or a reference to a filehandle. Any
+errors generated internally by Berkeley DB will be logged to this file.
+
+=item -ErrPrefix
+
+Allows a prefix to be added to the error messages before they are sent
+to B<-ErrFile>.
+
+=item -Flags
+
+The B<Flags> parameter specifies both which sub-systems to initialise,
+as well as a number of environment-wide options.
+See the Berkeley DB documentation for more details of these options.
+
+Any of the following can be specified by OR'ing them:
+
+B<DB_CREATE>
+
+If any of the files specified do not already exist, create them.
+
+B<DB_INIT_CDB>
+
+Initialise the Concurrent Access Methods
+
+B<DB_INIT_LOCK>
+
+Initialise the Locking sub-system.
+
+B<DB_INIT_LOG>
+
+Initialise the Logging sub-system.
+
+B<DB_INIT_MPOOL>
+
+Initialise the ...
+
+B<DB_INIT_TXN>
+
+Initialise the ...
+
+B<DB_MPOOL_PRIVATE>
+
+Initialise the ...
+
+B<DB_INIT_MPOOL> is also specified.
+
+Initialise the ...
+
+B<DB_NOMMAP>
+
+Initialise the ...
+
+B<DB_RECOVER>
+
+
+
+B<DB_RECOVER_FATAL>
+
+B<DB_THREAD>
+
+B<DB_TXN_NOSYNC>
+
+B<DB_USE_ENVIRON>
+
+B<DB_USE_ENVIRON_ROOT>
+
+=item -LockDetect
+
+Specifies what to do when a lock conflict occurs. The value should be one of
+
+B<DB_LOCK_DEFAULT>
+
+B<DB_LOCK_OLDEST>
+
+B<DB_LOCK_RANDOM>
+
+B<DB_LOCK_YOUNGEST>
+
+=item -Verbose
+
+Add extra debugging information to the messages sent to B<-ErrFile>.
+
+=back
+
+=head2 Methods
+
+The environment class has the following methods:
+
+=over 5
+
+=item $env->errPrefix("string") ;
+
+This method is identical to the B<-ErrPrefix> flag. It allows the
+error prefix string to be changed dynamically.
+
+=item $txn = $env->TxnMgr()
+
+Constructor for creating a B<TxnMgr> object.
+See L<"TRANSACTIONS"> for more details of using transactions.
+
+This method is deprecated. Access the transaction methods using the B<txn_>
+methods below from the environment object directly.
+
+=item $env->txn_begin()
+
+TODO
+
+=item $env->txn_stat()
+
+TODO
+
+=item $env->txn_checkpoint()
+
+TODO
+
+=item $env->status()
+
+Returns the status of the last BerkeleyDB::Env method.
+
+=item $env->setmutexlocks()
+
+Only available in Berkeley Db 3.0 or greater. Calls
+B<db_env_set_mutexlocks> when used with Berkeley DB 3.1.x. When used with
+Berkeley DB 3.0 or 3.2 and better it calls B<DBENV-E<gt>set_mutexlocks>.
+
+=back
+
+=head2 Examples
+
+TODO.
+
+=head1 THE DATABASE CLASSES
+
+B<BerkeleyDB> supports the following database formats:
+
+=over 5
+
+=item B<BerkeleyDB::Hash>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using B<BerkeleyDB::Hash> are not compatible with any
+of the other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most applications,
+is built into BerkeleyDB. If you do need to use your own hashing algorithm
+it is possible to write your own in Perl and have B<BerkeleyDB> use
+it instead.
+
+=item B<BerkeleyDB::Btree>
+
+The Btree format allows arbitrary key/value pairs to be stored in a
+B+tree.
+
+As with the B<BerkeleyDB::Hash> format, it is possible to provide a
+user defined Perl routine to perform the comparison of keys. By default,
+though, the keys are stored in lexical order.
+
+=item B<BerkeleyDB::Recno>
+
+TODO.
+
+
+=item B<BerkeleyDB::Queue>
+
+TODO.
+
+=item B<BerkeleyDB::Unknown>
+
+This isn't a database format at all. It is used when you want to open an
+existing Berkeley DB database without having to know what type is it.
+
+=back
+
+
+Each of the database formats described above is accessed via a
+corresponding B<BerkeleyDB> class. These will be described in turn in
+the next sections.
+
+=head1 BerkeleyDB::Hash
+
+Equivalent to calling B<db_open> with type B<DB_HASH> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_HASH> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Hash
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Hash',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Hash specific
+ [ -Ffactor => number,]
+ [ -Nelem => number,]
+ [ -Hash => code reference,]
+ [ -DupCompare => code reference,]
+
+
+When the "tie" interface is used, reading from and writing to the database
+is achieved via the tied hash. In this case the database operates like
+a Perl associative array that happens to be stored on disk.
+
+In addition to the high-level tied hash interface, it is possible to
+make use of the underlying methods provided by Berkeley DB
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Hash> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item -Ffactor
+
+=item -Nelem
+
+See the Berkeley DB documentation for details of these options.
+
+=item -Hash
+
+Allows you to provide a user defined hash function. If not specified,
+a default hash function is used. Here is a template for a user-defined
+hash function
+
+ sub hash
+ {
+ my ($data) = shift ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Hash => \&hash,
+ ...
+
+See L<""> for an example.
+
+=item -DupCompare
+
+Used in conjunction with the B<DB_DUPOSRT> flag.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Property => DB_DUP|DB_DUPSORT,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+
+=head2 Methods
+
+B<BerkeleyDB::Hash> only supports the standard database methods.
+See L<COMMON DATABASE METHODS>.
+
+=head2 A Simple Tied Hash Example
+
+## simpleHash
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved from a Hash database are in an apparently random order.
+
+=head2 Another Simple Hash Example
+
+Do the same as the previous example but not using tie.
+
+## simpleHash2
+
+=head2 Duplicate keys
+
+The code below is a variation on the examples above. This time the hash has
+been inverted. The key this time is colour and the value is the fruit name.
+The B<DB_DUP> flag has been specified to allow duplicates.
+
+##dupHash
+
+here is the output:
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> banana
+ green -> apple
+
+=head2 Sorting Duplicate Keys
+
+In the previous example, when there were duplicate keys, the values are
+sorted in the order they are stored in. The code below is
+identical to the previous example except the B<DB_DUPSORT> flag is
+specified.
+
+##dupSortHash
+
+Notice that in the output below the duplicate values are sorted.
+
+ orange -> orange
+ yellow -> banana
+ red -> apple
+ red -> tomato
+ green -> apple
+ green -> banana
+
+=head2 Custom Sorting Duplicate Keys
+
+Another variation
+
+TODO
+
+=head2 Changing the hash
+
+TODO
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Btree
+
+Equivalent to calling B<db_open> with type B<DB_BTREE> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_BTREE> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+
+ $db = new BerkeleyDB::Btree
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+and this
+
+ [$db =] tie %hash, 'BerkeleyDB::Btree',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Btree specific
+ [ -Minkey => number,]
+ [ -Compare => code reference,]
+ [ -DupCompare => code reference,]
+ [ -Prefix => code reference,]
+
+=head2 Options
+
+In addition to the standard set of options (see L<COMMON OPTIONS>)
+B<BerkeleyDB::Btree> supports these options:
+
+=over 5
+
+=item -Property
+
+Used to specify extra flags when opening a database. The following
+flags may be specified by logically OR'ing together one or more of the
+following values:
+
+B<DB_DUP>
+
+When creating a new database, this flag enables the storing of duplicate
+keys in the database. If B<DB_DUPSORT> is not specified as well, the
+duplicates are stored in the order they are created in the database.
+
+B<DB_DUPSORT>
+
+Enables the sorting of duplicate keys in the database. Ignored if
+B<DB_DUP> isn't also specified.
+
+=item Minkey
+
+TODO
+
+=item Compare
+
+Allow you to override the default sort order used in the database. See
+L<"Changing the sort order"> for an example.
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Compare => \&compare,
+ ...
+
+=item Prefix
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Prefix => \&prefix,
+ ...
+=item DupCompare
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -DupCompare => \&compare,
+ ...
+
+=back
+
+=head2 Methods
+
+B<BerkeleyDB::Btree> supports the following database methods.
+See also L<COMMON DATABASE METHODS>.
+
+All the methods below return 0 to indicate success.
+
+=over 5
+
+=item $status = $db->db_key_range($key, $less, $equal, $greater [, $flags])
+
+Given a key, C<$key>, this method returns the proportion of keys less than
+C<$key> in C<$less>, the proportion equal to C<$key> in C<$equal> and the
+proportion greater than C<$key> in C<$greater>.
+
+The proportion is returned as a double in the range 0.0 to 1.0.
+
+=back
+
+=head2 A Simple Btree Example
+
+The code below is a simple example of using a btree database.
+
+## btreeSimple
+
+Here is the output from the code above. The keys have been sorted using
+Berkeley DB's default sorting algorithm.
+
+ Smith
+ Wall
+ mouse
+
+
+=head2 Changing the sort order
+
+It is possible to supply your own sorting algorithm if the one that Berkeley
+DB used isn't suitable. The code below is identical to the previous example
+except for the case insensitive compare function.
+
+## btreeSortOrder
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=back
+
+=head2 Using db_stat
+
+TODO
+
+=head1 BerkeleyDB::Recno
+
+Equivalent to calling B<db_open> with type B<DB_RECNO> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_RECNO> in
+Berkeley DB 3.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Recno
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Recno',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Recno specific
+ [ -Delim => byte,]
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -Source => filename,]
+
+=head2 A Recno Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+## simpleRecno
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head1 BerkeleyDB::Queue
+
+Equivalent to calling B<db_create> followed by B<DB-E<gt>open> with
+type B<DB_QUEUE> in Berkeley DB 3.x. This database format isn't available if
+you use Berkeley DB 2.x.
+
+Two forms of constructor are supported:
+
+ $db = new BerkeleyDB::Queue
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+ [ -ExtentSize => number, ]
+
+and this
+
+ [$db =] tie @arry, 'BerkeleyDB::Queue',
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+ # BerkeleyDB::Queue specific
+ [ -Len => number,]
+ [ -Pad => byte,]
+
+
+=head1 BerkeleyDB::Unknown
+
+This class is used to open an existing database.
+
+Equivalent to calling B<db_open> with type B<DB_UNKNOWN> in Berkeley DB 2.x and
+calling B<db_create> followed by B<DB-E<gt>open> with type B<DB_UNKNOWN> in
+Berkeley DB 3.x.
+
+The constructor looks like this:
+
+ $db = new BerkeleyDB::Unknown
+ [ -Filename => "filename", ]
+ [ -Subname => "sub-database name", ]
+ [ -Flags => flags,]
+ [ -Property => flags,]
+ [ -Mode => number,]
+ [ -Cachesize => number,]
+ [ -Lorder => number,]
+ [ -Pagesize => number,]
+ [ -Env => $env,]
+ [ -Txn => $txn,]
+
+
+=head2 An example
+
+=head1 COMMON OPTIONS
+
+All database access class constructors support the common set of
+options defined below. All are optional.
+
+=over 5
+
+=item -Filename
+
+The database filename. If no filename is specified, a temporary file will
+be created and removed once the program terminates.
+
+=item -Subname
+
+Specifies the name of the sub-database to open.
+This option is only valid if you are using Berkeley DB 3.x.
+
+=item -Flags
+
+Specify how the database will be opened/created. The valid flags are:
+
+B<DB_CREATE>
+
+Create any underlying files, as necessary. If the files do not already
+exist and the B<DB_CREATE> flag is not specified, the call will fail.
+
+B<DB_NOMMAP>
+
+Not supported by BerkeleyDB.
+
+B<DB_RDONLY>
+
+Opens the database in read-only mode.
+
+B<DB_THREAD>
+
+Not supported by BerkeleyDB.
+
+B<DB_TRUNCATE>
+
+If the database file already exists, remove all the data before
+opening it.
+
+=item -Mode
+
+Determines the file protection when the database is created. Defaults
+to 0666.
+
+=item -Cachesize
+
+=item -Lorder
+
+=item -Pagesize
+
+=item -Env
+
+When working under a Berkeley DB environment, this parameter
+
+Defaults to no environment.
+
+=item -Txn
+
+TODO.
+
+=back
+
+=head1 COMMON DATABASE METHODS
+
+All the database interfaces support the common set of methods defined
+below.
+
+All the methods below return 0 to indicate success.
+
+=head2 $status = $db->db_get($key, $value [, $flags])
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. If it exists, the value read from the database is
+returned in the C<$value> parameter.
+
+The B<$flags> parameter is optional. If present, it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_GET_BOTH>
+
+When the B<DB_GET_BOTH> flag is specified, B<db_get> checks for the
+existence of B<both> the C<$key> B<and> C<$value> in the database.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO
+
+=back
+
+
+=head2 $status = $db->db_put($key, $value [, $flags])
+
+Stores a key/value pair in the database.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_APPEND>
+
+This flag is only applicable when accessing a B<BerkeleyDB::Recno>
+database.
+
+TODO.
+
+
+=item B<DB_NOOVERWRITE>
+
+If this flag is specified and C<$key> already exists in the database,
+the call to B<db_put> will return B<DB_KEYEXIST>.
+
+=back
+
+=head2 $status = $db->db_del($key [, $flags])
+
+Deletes a key/value pair in the database associated with C<$key>.
+If duplicate keys are enabled in the database, B<db_del> will delete
+B<all> key/value pairs with key C<$key>.
+
+The B<$flags> parameter is optional and is currently unused.
+
+=head2 $status = $db->db_sync()
+
+If any parts of the database are in memory, write them to the database.
+
+=head2 $cursor = $db->db_cursor([$flags])
+
+Creates a cursor object. This is used to access the contents of the
+database sequentially. See L<CURSORS> for details of the methods
+available when working with cursors.
+
+The B<$flags> parameter is optional. If present it must be set to B<one>
+of the following values:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_set($offset, $length) ;
+
+TODO
+
+=head2 ($flag, $old_offset, $old_length) = $db->partial_clear() ;
+
+TODO
+
+=head2 $db->byteswapped()
+
+TODO
+
+=head2 $db->type()
+
+Returns the type of the database. The possible return code are B<DB_HASH>
+for a B<BerkeleyDB::Hash> database, B<DB_BTREE> for a B<BerkeleyDB::Btree>
+database and B<DB_RECNO> for a B<BerkeleyDB::Recno> database. This method
+is typically used when a database has been opened with
+B<BerkeleyDB::Unknown>.
+
+=item $ref = $db->db_stat()
+
+Returns a reference to an associative array containing information about
+the database. The keys of the associative array correspond directly to the
+names of the fields defined in the Berkeley DB documentation. For example,
+in the DB documentation, the field B<bt_version> stores the version of the
+Btree database. Assuming you called B<db_stat> on a Btree database the
+equivalent field would be accessed as follows:
+
+ $version = $ref->{'bt_version'} ;
+
+If you are using Berkeley DB 3.x, this method will work will all database
+formats. When DB 2.x is used, it only works with B<BerkeleyDB::Btree>.
+
+=head2 $status = $db->status()
+
+Returns the status of the last C<$db> method called.
+
+=head1 CURSORS
+
+A cursor is used whenever you want to access the contents of a database
+in sequential order.
+A cursor object is created with the C<db_cursor>
+
+A cursor object has the following methods available:
+
+=head2 $newcursor = $cursor->c_dup($flags)
+
+Creates a duplicate of C<$cursor>. This method needs Berkeley DB 3.0.x or better.
+
+The C<$flags> parameter is optional and can take the following value:
+
+=over 5
+
+=item DB_POSITION
+
+When present this flag will position the new cursor at the same place as the
+existing cursor.
+
+=back
+
+=head2 $status = $cursor->c_get($key, $value, $flags)
+
+Reads a key/value pair from the database, returning the data in C<$key>
+and C<$value>. The key/value pair actually read is controlled by the
+C<$flags> parameter, which can take B<one> of the following values:
+
+=over 5
+
+=item B<DB_FIRST>
+
+Set the cursor to point to the first key/value pair in the
+database. Return the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_LAST>
+
+Set the cursor to point to the last key/value pair in the database. Return
+the key/value pair in C<$key> and C<$value>.
+
+=item B<DB_NEXT>
+
+If the cursor is already pointing to a key/value pair, it will be
+incremented to point to the next key/value pair and return its contents.
+
+If the cursor isn't initialised, B<DB_NEXT> works just like B<DB_FIRST>.
+
+If the cursor is already positioned at the last key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_NEXT_DUP>
+
+This flag is only valid when duplicate keys have been enabled in
+a database.
+If the cursor is already pointing to a key/value pair and the key of
+the next key/value pair is identical, the cursor will be incremented to
+point to it and their contents returned.
+
+=item B<DB_PREV>
+
+If the cursor is already pointing to a key/value pair, it will be
+decremented to point to the previous key/value pair and return its
+contents.
+
+If the cursor isn't initialised, B<DB_PREV> works just like B<DB_LAST>.
+
+If the cursor is already positioned at the first key/value pair, B<c_get>
+will return B<DB_NOTFOUND>.
+
+=item B<DB_CURRENT>
+
+If the cursor has been set to point to a key/value pair, return their
+contents.
+If the key/value pair referenced by the cursor has been deleted, B<c_get>
+will return B<DB_KEYEMPTY>.
+
+=item B<DB_SET>
+
+Set the cursor to point to the key/value pair referenced by B<$key>
+and return the value in B<$value>.
+
+=item B<DB_SET_RANGE>
+
+This flag is a variation on the B<DB_SET> flag. As well as returning
+the value, it also returns the key, via B<$key>.
+When used with a B<BerkeleyDB::Btree> database the key matched by B<c_get>
+will be the shortest key (in length) which is greater than or equal to
+the key supplied, via B<$key>. This allows partial key searches.
+See ??? for an example of how to use this flag.
+
+=item B<DB_GET_BOTH>
+
+Another variation on B<DB_SET>. This one returns both the key and
+the value.
+
+=item B<DB_SET_RECNO>
+
+TODO.
+
+=item B<DB_GET_RECNO>
+
+TODO.
+
+=back
+
+In addition, the following value may be set by logically OR'ing it into
+the B<$flags> parameter:
+
+=over 5
+
+=item B<DB_RMW>
+
+TODO.
+
+=back
+
+=head2 $status = $cursor->c_put($key, $value, $flags)
+
+Stores the key/value pair in the database. The position that the data is
+stored in the database is controlled by the C<$flags> parameter, which
+must take B<one> of the following values:
+
+=over 5
+
+=item B<DB_AFTER>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately after the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+
+=item B<DB_BEFORE>
+
+When used with a Btree or Hash database, a duplicate of the key referenced
+by the current cursor position will be created and the contents of
+B<$value> will be associated with it - B<$key> is ignored.
+The new key/value pair will be stored immediately before the current
+cursor position.
+Obviously the database has to have been opened with B<DB_DUP>.
+
+When used with a Recno ... TODO
+
+=item B<DB_CURRENT>
+
+If the cursor has been initialised, replace the value of the key/value
+pair stored in the database with the contents of B<$value>.
+
+=item B<DB_KEYFIRST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the first entry in
+the duplicates for the particular key.
+
+=item B<DB_KEYLAST>
+
+Only valid with a Btree or Hash database. This flag is only really
+used when duplicates are enabled in the database and sorted duplicates
+haven't been specified.
+In this case the key/value pair will be inserted as the last entry in
+the duplicates for the particular key.
+
+=back
+
+=head2 $status = $cursor->c_del([$flags])
+
+This method deletes the key/value pair associated with the current cursor
+position. The cursor position will not be changed by this operation, so
+any subsequent cursor operation must first initialise the cursor to
+point to a valid key/value pair.
+
+If the key/value pair associated with the cursor have already been
+deleted, B<c_del> will return B<DB_KEYEMPTY>.
+
+The B<$flags> parameter is not used at present.
+
+=head2 $status = $cursor->c_del($cnt [, $flags])
+
+Stores the number of duplicates at the current cursor position in B<$cnt>.
+
+The B<$flags> parameter is not used at present. This method needs
+Berkeley DB 3.1 or better.
+
+=head2 $status = $cursor->status()
+
+Returns the status of the last cursor method as a dual type.
+
+=head2 Cursor Examples
+
+TODO
+
+Iterating from first to last, then in reverse.
+
+examples of each of the flags.
+
+=head1 JOIN
+
+Join support for BerkeleyDB is in progress. Watch this space.
+
+TODO
+
+=head1 TRANSACTIONS
+
+TODO.
+
+=head1 DBM Filters
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a DBM
+database. All of the database classes (BerkeleyDB::Hash,
+BerkeleyDB::Btree and BerkeleyDB::Recno) support DBM Filters.
+
+There are four methods associated with DBM Filters. All work
+identically, and each is used to install (or uninstall) a single DBM
+Filter. Each expects a single parameter, namely a reference to a sub.
+The only difference between them is the place that the filter is
+installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database that you need
+to share with a third-party C application. The C application assumes
+that I<all> keys and values are NULL terminated. Unfortunately when
+Perl writes to DBM databases it doesn't use NULL termination, so your
+Perl application will have to manage NULL termination itself. When you
+write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+## nullFilter
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "something" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+## intFilter
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 Using BerkeleyDB with MLDBM
+
+Both BerkeleyDB::Hash and BerkeleyDB::Btree can be used with the MLDBM
+module. The code fragment below shows how to open associate MLDBM with
+BerkeleyDB::Btree. To use BerkeleyDB::Hash just replace
+BerkeleyDB::Btree with BerkeleyDB::Hash.
+
+ use strict ;
+ use BerkeleyDB ;
+ use MLDBM qw(BerkeleyDB::Btree) ;
+ use Data::Dumper;
+
+ my $filename = 'testmldbm' ;
+ my %o ;
+
+ unlink $filename ;
+ tie %o, 'MLDBM', -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open database '$filename: $!\n";
+
+See the MLDBM documentation for information on how to use the module
+and for details of its limitations.
+
+=head1 EXAMPLES
+
+TODO.
+
+=head1 HINTS & TIPS
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings
+are not. See L<An Example -- the NULL termination problem.> in the DBM
+FILTERS section for a generic way to work around this problem.
+
+
+=head2 The untie Gotcha
+
+TODO
+
+=head1 COMMON QUESTIONS
+
+This section attempts to answer some of the more common questions that
+I get asked.
+
+
+=head2 Relationship with DB_File
+
+Before Berkeley DB 2.x was written there was only one Perl module that
+interfaced to Berkeley DB. That module is called B<DB_File>. Although
+B<DB_File> can be build with Berkeley DB 1.x, 2.x or 3.x, it only provides
+an interface to the functionality available in Berkeley DB 1.x. That
+means that it doesn't support transactions, locking or any of the other
+new features available in DB 2.x or 3.x.
+
+=head2 How do I store Perl data structures with BerkeleyDB?
+
+See L<Using BerkeleyDB with MLDBM>.
+
+=head1 HISTORY
+
+See the Changes file.
+
+=head1 AVAILABILITY
+
+The most recent version of B<BerkeleyDB> can always be found
+on CPAN (see L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/BerkeleyDB>.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-2001 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<BerkeleyDB> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of Berkeley
+ DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of BerkeleyDB.
+See L<"AUTHOR"> for details.
+
+
+=head1 AUTHOR
+
+Paul Marquess E<lt>Paul.Marquess@btinternet.comE<gt>.
+
+Questions about Berkeley DB may be addressed to E<lt>db@sleepycat.comE<gt>.
+
+=head1 SEE ALSO
+
+perl(1), DB_File, Berkeley DB.
+
+=cut
diff --git a/db/perl/BerkeleyDB/BerkeleyDB.xs b/db/perl/BerkeleyDB/BerkeleyDB.xs
new file mode 100644
index 000000000..c2230e053
--- /dev/null
+++ b/db/perl/BerkeleyDB/BerkeleyDB.xs
@@ -0,0 +1,4058 @@
+/*
+
+ BerkeleyDB.xs -- Perl 5 interface to Berkeley DB version 2 & 3
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1997-2001 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Please refer to the COPYRIGHT section in
+
+ Changes:
+ 0.01 - First Alpha Release
+ 0.02 -
+
+*/
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+#define PERL_POLLUTE
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
+ * shortly #included by the <db.h>) __attribute__ to the possibly
+ * already defined __attribute__, for example by GNUC or by Perl. */
+
+#undef __attribute__
+
+#ifndef PERL_VERSION
+# include "patchlevel.h"
+# define PERL_REVISION 5
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+#endif
+
+#if PERL_REVISION == 5 && (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION <= 75 ))
+
+# define PL_sv_undef sv_undef
+# define PL_na na
+# define PL_dirty dirty
+
+#endif
+
+#ifdef USE_PERLIO
+# define GetFILEptr(sv) PerlIO_findFILE(IoOFP(sv_2io(sv)))
+#else
+# define GetFILEptr(sv) IoOFP(sv_2io(sv))
+#endif
+
+#include <db.h>
+
+#if (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 0)
+# define IS_DB_3_0_x
+#endif
+
+#if DB_VERSION_MAJOR >= 3
+# define AT_LEAST_DB_3
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 1)
+# define AT_LEAST_DB_3_1
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
+# define AT_LEAST_DB_3_2
+#endif
+
+#if DB_VERSION_MAJOR > 3 || \
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR > 2) ||\
+ (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR == 2 && DB_VERSION_PATCH >= 6)
+# define AT_LEAST_DB_3_2_6
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 3)
+# define AT_LEAST_DB_3_3
+#endif
+
+/* need to define DEFSV & SAVE_DEFSV for older version of Perl */
+#ifndef DEFSV
+# define DEFSV GvSV(defgv)
+#endif
+
+#ifndef SAVE_DEFSV
+# define SAVE_DEFSV SAVESPTR(GvSV(defgv))
+#endif
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef dTHR
+# define dTHR
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(a,b) newSVpv(a,b)
+#endif
+
+#ifndef PTR2IV
+# define PTR2IV(d) (IV)(d)
+#endif /* PTR2IV */
+
+#ifndef INT2PTR
+# define INT2PTR(any,d) (any)(d)
+#endif /* INT2PTR */
+
+#ifdef __cplusplus
+}
+#endif
+
+#define DBM_FILTERING
+#define STRICT_CLOSE
+/* #define ALLOW_RECNO_OFFSET */
+/* #define TRACE */
+
+#if DB_VERSION_MAJOR == 2 && ! defined(DB_LOCK_DEADLOCK)
+# define DB_LOCK_DEADLOCK EAGAIN
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#if DB_VERSION_MAJOR == 2
+# define DB_QUEUE 4
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#ifdef AT_LEAST_DB_3_2
+# define DB_callback DB * db,
+#else
+# define DB_callback
+#endif
+
+#if DB_VERSION_MAJOR > 2
+typedef struct {
+ int db_lorder;
+ size_t db_cachesize;
+ size_t db_pagesize;
+
+
+ void *(*db_malloc) __P((size_t));
+ int (*dup_compare)
+ __P((DB_callback const DBT *, const DBT *));
+
+ u_int32_t bt_maxkey;
+ u_int32_t bt_minkey;
+ int (*bt_compare)
+ __P((DB_callback const DBT *, const DBT *));
+ size_t (*bt_prefix)
+ __P((DB_callback const DBT *, const DBT *));
+
+ u_int32_t h_ffactor;
+ u_int32_t h_nelem;
+ u_int32_t (*h_hash)
+ __P((DB_callback const void *, u_int32_t));
+
+ int re_pad;
+ int re_delim;
+ u_int32_t re_len;
+ char *re_source;
+
+#define DB_DELIMITER 0x0001
+#define DB_FIXEDLEN 0x0008
+#define DB_PAD 0x0010
+ u_int32_t flags;
+ u_int32_t q_extentsize;
+} DB_INFO ;
+
+#endif /* DB_VERSION_MAJOR > 2 */
+
+typedef struct {
+ int Status ;
+ /* char ErrBuff[1000] ; */
+ SV * ErrPrefix ;
+ SV * ErrHandle ;
+ DB_ENV * Env ;
+ int open_dbs ;
+ int TxnMgrStatus ;
+ int active ;
+ bool txn_enabled ;
+ } BerkeleyDB_ENV_type ;
+
+
+typedef struct {
+ DBTYPE type ;
+ bool recno_or_queue ;
+ char * filename ;
+ BerkeleyDB_ENV_type * parent_env ;
+ DB * dbp ;
+ SV * compare ;
+ SV * dup_compare ;
+ SV * prefix ;
+ SV * hash ;
+ int Status ;
+ DB_INFO * info ;
+ DBC * cursor ;
+ DB_TXN * txn ;
+ int open_cursors ;
+ u_int32_t partial ;
+ u_int32_t dlen ;
+ u_int32_t doff ;
+ int active ;
+#ifdef ALLOW_RECNO_OFFSET
+ int array_base ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif
+ } BerkeleyDB_type;
+
+
+typedef struct {
+ DBTYPE type ;
+ bool recno_or_queue ;
+ char * filename ;
+ DB * dbp ;
+ SV * compare ;
+ SV * dup_compare ;
+ SV * prefix ;
+ SV * hash ;
+ int Status ;
+ DB_INFO * info ;
+ DBC * cursor ;
+ DB_TXN * txn ;
+ BerkeleyDB_type * parent_db ;
+ u_int32_t partial ;
+ u_int32_t dlen ;
+ u_int32_t doff ;
+ int active ;
+#ifdef ALLOW_RECNO_OFFSET
+ int array_base ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif
+ } BerkeleyDB_Cursor_type;
+
+typedef struct {
+ BerkeleyDB_ENV_type * env ;
+ } BerkeleyDB_TxnMgr_type ;
+
+#if 1
+typedef struct {
+ int Status ;
+ DB_TXN * txn ;
+ int active ;
+ } BerkeleyDB_Txn_type ;
+#else
+typedef DB_TXN BerkeleyDB_Txn_type ;
+#endif
+
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env ;
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env__Raw ;
+typedef BerkeleyDB_ENV_type * BerkeleyDB__Env__Inner ;
+typedef BerkeleyDB_type * BerkeleyDB ;
+typedef void * BerkeleyDB__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Common ;
+typedef BerkeleyDB_type * BerkeleyDB__Common__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Common__Inner ;
+typedef BerkeleyDB_type * BerkeleyDB__Hash ;
+typedef BerkeleyDB_type * BerkeleyDB__Hash__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Btree ;
+typedef BerkeleyDB_type * BerkeleyDB__Btree__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Recno ;
+typedef BerkeleyDB_type * BerkeleyDB__Recno__Raw ;
+typedef BerkeleyDB_type * BerkeleyDB__Queue ;
+typedef BerkeleyDB_type * BerkeleyDB__Queue__Raw ;
+typedef BerkeleyDB_Cursor_type BerkeleyDB__Cursor_type ;
+typedef BerkeleyDB_Cursor_type * BerkeleyDB__Cursor ;
+typedef BerkeleyDB_Cursor_type * BerkeleyDB__Cursor__Raw ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Raw ;
+typedef BerkeleyDB_TxnMgr_type * BerkeleyDB__TxnMgr__Inner ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn__Raw ;
+typedef BerkeleyDB_Txn_type * BerkeleyDB__Txn__Inner ;
+#if 0
+typedef DB_LOG * BerkeleyDB__Log ;
+typedef DB_LOCKTAB * BerkeleyDB__Lock ;
+#endif
+typedef DBT DBTKEY ;
+typedef DBT DBT_OPT ;
+typedef DBT DBT_B ;
+typedef DBT DBTKEY_B ;
+typedef DBT DBTVALUE ;
+typedef void * PV_or_NULL ;
+typedef PerlIO * IO_or_NULL ;
+typedef int DualType ;
+
+static void
+hash_delete(char * hash, char * key);
+
+#ifdef TRACE
+# define Trace(x) printf x
+#else
+# define Trace(x)
+#endif
+
+#ifdef ALLOW_RECNO_OFFSET
+# define RECNO_BASE db->array_base
+#else
+# define RECNO_BASE 1
+#endif
+
+#if DB_VERSION_MAJOR == 2
+# define flagSet_DB2(i, f) i |= f
+#else
+# define flagSet_DB2(i, f)
+#endif
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define flagSet(bitmask) (flags & (bitmask))
+#else
+# define flagSet(bitmask) ((flags & DB_OPFLAGS_MASK) == (bitmask))
+#endif
+
+#ifdef DBM_FILTERING
+#define ckFilter(arg,type,name) \
+ if (db->type) { \
+ SV * save_defsv ; \
+ /* printf("filtering %s\n", name) ;*/ \
+ if (db->filtering) \
+ softCrash("recursion detected in %s", name) ; \
+ db->filtering = TRUE ; \
+ save_defsv = newSVsv(DEFSV) ; \
+ sv_setsv(DEFSV, arg) ; \
+ PUSHMARK(sp) ; \
+ (void) perl_call_sv(db->type, G_DISCARD|G_NOARGS); \
+ sv_setsv(arg, DEFSV) ; \
+ sv_setsv(DEFSV, save_defsv) ; \
+ SvREFCNT_dec(save_defsv) ; \
+ db->filtering = FALSE ; \
+ /*printf("end of filtering %s\n", name) ;*/ \
+ }
+#else
+#define ckFilter(type, sv, name)
+#endif
+
+#define ERR_BUFF "BerkeleyDB::Error"
+
+#define ZMALLOC(to, typ) ((to = (typ *)safemalloc(sizeof(typ))), \
+ Zero(to,1,typ))
+
+#define DBT_clear(x) Zero(&x, 1, DBT) ;
+
+#if 1
+#define getInnerObject(x) (*av_fetch((AV*)SvRV(x), 0, FALSE))
+#else
+#define getInnerObject(x) ((SV*)SvRV(sv))
+#endif
+
+#define my_sv_setpvn(sv, d, s) (s ? sv_setpvn(sv, d, s) : sv_setpv(sv, "") )
+
+#define SetValue_iv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = SvIV(sv)
+#define SetValue_io(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = GetFILEptr(sv)
+#define SetValue_sv(i, k) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = sv
+#define SetValue_pv(i, k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = (t)SvPV(sv,PL_na)
+#define SetValue_pvx(i, k, t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) \
+ i = (t)SvPVX(sv)
+#define SetValue_ov(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ IV tmp = SvIV(getInnerObject(sv)) ; \
+ i = INT2PTR(t, tmp) ; \
+ }
+
+#define SetValue_ovx(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ HV * hv = (HV *)GetInternalObject(sv); \
+ SV ** svp = hv_fetch(hv, "db", 2, FALSE);\
+ IV tmp = SvIV(*svp); \
+ i = INT2PTR(t, tmp) ; \
+ }
+
+#define SetValue_ovX(i,k,t) if ((sv = readHash(hash, k)) && sv != &PL_sv_undef) {\
+ IV tmp = SvIV(GetInternalObject(sv));\
+ i = INT2PTR(t, tmp) ; \
+ }
+
+#define LastDBerror DB_RUNRECOVERY
+
+#define setDUALerrno(var, err) \
+ sv_setnv(var, (double)err) ; \
+ sv_setpv(var, ((err) ? db_strerror(err) : "")) ;\
+ SvNOK_on(var);
+
+#define OutputValue(arg, name) \
+ { if (RETVAL == 0) { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ } \
+ }
+
+#define OutputValue_B(arg, name) \
+ { if (RETVAL == 0) { \
+ if (db->type == DB_BTREE && \
+ flagSet(DB_GET_RECNO)){ \
+ sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
+ } \
+ else { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ } \
+ ckFilter(arg, filter_fetch_value, "filter_fetch_value"); \
+ } \
+ }
+
+#define OutputKey(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (!db->recno_or_queue) { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ else \
+ sv_setiv(arg, (I32)*(I32*)name.data - RECNO_BASE); \
+ ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ } \
+ }
+
+#define OutputKey_B(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (db->recno_or_queue || \
+ (db->type == DB_BTREE && \
+ flagSet(DB_GET_RECNO))){ \
+ sv_setiv(arg, (I32)(*(I32*)name.data) - RECNO_BASE); \
+ } \
+ else { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ ckFilter(arg, filter_fetch_key, "filter_fetch_key") ; \
+ } \
+ }
+
+#define SetPartial(data,db) \
+ data.flags = db->partial ; \
+ data.dlen = db->dlen ; \
+ data.doff = db->doff ;
+
+#define ckActive(active, type) \
+ { \
+ if (!active) \
+ softCrash("%s is already closed", type) ; \
+ }
+
+#define ckActive_Environment(a) ckActive(a, "Environment")
+#define ckActive_TxnMgr(a) ckActive(a, "Transaction Manager")
+#define ckActive_Transaction(a) ckActive(a, "Transaction")
+#define ckActive_Database(a) ckActive(a, "Database")
+#define ckActive_Cursor(a) ckActive(a, "Cursor")
+
+/* Internal Global Data */
+static db_recno_t Value ;
+static db_recno_t zero = 0 ;
+static BerkeleyDB CurrentDB ;
+static DBTKEY empty ;
+#if 0
+static char ErrBuff[1000] ;
+#endif
+
+#ifdef AT_LEAST_DB_3_3
+# if PERL_REVISION == 5 && PERL_VERSION <= 4
+
+/* saferealloc in perl5.004 will croak if it is given a NULL pointer*/
+void *
+MyRealloc(void * ptr, size_t size)
+{
+ if (ptr == NULL )
+ return safemalloc(size) ;
+ else
+ return saferealloc(ptr, size) ;
+}
+
+# else
+# define MyRealloc saferealloc
+# endif
+#endif
+
+static char *
+my_strdup(const char *s)
+{
+ if (s == NULL)
+ return NULL ;
+
+ {
+ MEM_SIZE l = strlen(s);
+ char *s1 = (char *)safemalloc(l);
+
+ Copy(s, s1, (MEM_SIZE)l, char);
+ return s1;
+ }
+}
+
+#if DB_VERSION_MAJOR == 2
+static char *
+db_strerror(int err)
+{
+ if (err == 0)
+ return "" ;
+
+ if (err > 0)
+ return Strerror(err) ;
+
+ switch (err) {
+ case DB_INCOMPLETE:
+ return ("DB_INCOMPLETE: Sync was unable to complete");
+ case DB_KEYEMPTY:
+ return ("DB_KEYEMPTY: Non-existent key/data pair");
+ case DB_KEYEXIST:
+ return ("DB_KEYEXIST: Key/data pair already exists");
+ case DB_LOCK_DEADLOCK:
+ return (
+ "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock");
+ case DB_LOCK_NOTGRANTED:
+ return ("DB_LOCK_NOTGRANTED: Lock not granted");
+ case DB_LOCK_NOTHELD:
+ return ("DB_LOCK_NOTHELD: Lock not held by locker");
+ case DB_NOTFOUND:
+ return ("DB_NOTFOUND: No matching key/data pair found");
+ case DB_RUNRECOVERY:
+ return ("DB_RUNRECOVERY: Fatal error, run database recovery");
+ default:
+ return "Unknown Error" ;
+
+ }
+}
+#endif /* DB_VERSION_MAJOR == 2 */
+
+#ifdef TRACE
+#if DB_VERSION_MAJOR > 2
+static char *
+my_db_strerror(int err)
+{
+ static char buffer[1000] ;
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ sprintf(buffer, "%d: %s", err, db_strerror(err)) ;
+ if (err && sv) {
+ strcat(buffer, ", ") ;
+ strcat(buffer, SvPVX(sv)) ;
+ }
+ return buffer;
+}
+#endif
+#endif
+
+static void
+close_everything(void)
+{
+ dTHR;
+ Trace(("close_everything\n")) ;
+ /* Abort All Transactions */
+ {
+ BerkeleyDB__Txn__Raw tid ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Txn", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void)hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_txns dirty=%d\n", PL_dirty)) ;
+ while ( (he = hv_iternext(hv)) ) {
+ tid = * (BerkeleyDB__Txn__Raw *) hv_iterkey(he, &len) ;
+ Trace((" Aborting Transaction [%d] in [%d] Active [%d]\n", tid->txn, tid, tid->active));
+ if (tid->active) {
+ txn_abort(tid->txn);
+ ++ closed ;
+ }
+ tid->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_txns aborted %d of %d transactios\n",closed, all)) ;
+ }
+
+ /* Close All Cursors */
+ {
+ BerkeleyDB__Cursor db ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Cursor", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void) hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_cursors \n")) ;
+ while ( (he = hv_iternext(hv)) ) {
+ db = * (BerkeleyDB__Cursor*) hv_iterkey(he, &len) ;
+ Trace((" Closing Cursor [%d] in [%d] Active [%d]\n", db->cursor, db, db->active));
+ if (db->active) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ ++ closed ;
+ }
+ db->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_cursors closed %d of %d cursors\n",closed, all)) ;
+ }
+
+ /* Close All Databases */
+ {
+ BerkeleyDB db ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Db", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void)hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_dbs\n" )) ;
+ while ( (he = hv_iternext(hv)) ) {
+ db = * (BerkeleyDB*) hv_iterkey(he, &len) ;
+ Trace((" Closing Database [%d] in [%d] Active [%d]\n", db->dbp, db, db->active));
+ if (db->active) {
+ (db->dbp->close)(db->dbp, 0) ;
+ ++ closed ;
+ }
+ db->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_dbs closed %d of %d dbs\n",closed, all)) ;
+ }
+
+ /* Close All Environments */
+ {
+ BerkeleyDB__Env env ;
+ HE * he ;
+ I32 len ;
+ HV * hv = perl_get_hv("BerkeleyDB::Term::Env", TRUE);
+ int all = 0 ;
+ int closed = 0 ;
+ (void)hv_iterinit(hv) ;
+ Trace(("BerkeleyDB::Term::close_all_envs\n")) ;
+ while ( (he = hv_iternext(hv)) ) {
+ env = * (BerkeleyDB__Env*) hv_iterkey(he, &len) ;
+ Trace((" Closing Environment [%d] in [%d] Active [%d]\n", env->Env, env, env->active));
+ if (env->active) {
+#if DB_VERSION_MAJOR == 2
+ db_appexit(env->Env) ;
+#else
+ (env->Env->close)(env->Env, 0) ;
+#endif
+ ++ closed ;
+ }
+ env->active = FALSE ;
+ ++ all ;
+ }
+ Trace(("End of BerkeleyDB::Term::close_all_envs closed %d of %d dbs\n",closed, all)) ;
+ }
+
+ Trace(("end close_everything\n")) ;
+
+}
+
+static void
+destroyDB(BerkeleyDB db)
+{
+ dTHR;
+ if (! PL_dirty && db->active) {
+ -- db->open_cursors ;
+ ((db->dbp)->close)(db->dbp, 0) ;
+ }
+ if (db->hash)
+ SvREFCNT_dec(db->hash) ;
+ if (db->compare)
+ SvREFCNT_dec(db->compare) ;
+ if (db->dup_compare)
+ SvREFCNT_dec(db->dup_compare) ;
+ if (db->prefix)
+ SvREFCNT_dec(db->prefix) ;
+#ifdef DBM_FILTERING
+ if (db->filter_fetch_key)
+ SvREFCNT_dec(db->filter_fetch_key) ;
+ if (db->filter_store_key)
+ SvREFCNT_dec(db->filter_store_key) ;
+ if (db->filter_fetch_value)
+ SvREFCNT_dec(db->filter_fetch_value) ;
+ if (db->filter_store_value)
+ SvREFCNT_dec(db->filter_store_value) ;
+#endif
+ hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
+ if (db->filename)
+ Safefree(db->filename) ;
+ Safefree(db) ;
+}
+
+static void
+softCrash(const char *pat, ...)
+{
+ char buffer1 [500] ;
+ char buffer2 [500] ;
+ va_list args;
+ va_start(args, pat);
+
+ Trace(("softCrash: %s\n", pat)) ;
+
+#define ABORT_PREFIX "BerkeleyDB Aborting: "
+
+ /* buffer = (char*) safemalloc(strlen(pat) + strlen(ABORT_PREFIX) + 1) ; */
+ strcpy(buffer1, ABORT_PREFIX) ;
+ strcat(buffer1, pat) ;
+
+ vsprintf(buffer2, buffer1, args) ;
+
+ croak(buffer2);
+
+ /* NOTREACHED */
+ va_end(args);
+}
+
+
+static I32
+GetArrayLength(BerkeleyDB db)
+{
+ DBT key ;
+ DBT value ;
+ int RETVAL = 0 ;
+ DBC * cursor ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor) == 0 )
+#else
+ if ( ((db->dbp)->cursor)(db->dbp, db->txn, &cursor, 0) == 0 )
+#endif
+ {
+ RETVAL = cursor->c_get(cursor, &key, &value, DB_LAST) ;
+ if (RETVAL == 0)
+ RETVAL = *(I32 *)key.data ;
+ else /* No key means empty file */
+ RETVAL = 0 ;
+ cursor->c_close(cursor) ;
+ }
+
+ Trace(("GetArrayLength got %d\n", RETVAL)) ;
+ return ((I32)RETVAL) ;
+}
+
+#if 0
+
+#define GetRecnoKey(db, value) _GetRecnoKey(db, value)
+
+static db_recno_t
+_GetRecnoKey(BerkeleyDB db, I32 value)
+{
+ Trace(("GetRecnoKey start value = %d\n", value)) ;
+ if (db->recno_or_queue && value < 0) {
+ /* Get the length of the array */
+ I32 length = GetArrayLength(db) ;
+
+ /* check for attempt to write before start of array */
+ if (length + value + RECNO_BASE <= 0)
+ softCrash("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
+
+ value = length + value + RECNO_BASE ;
+ }
+ else
+ ++ value ;
+
+ Trace(("GetRecnoKey end value = %d\n", value)) ;
+
+ return value ;
+}
+
+#else /* ! 0 */
+
+#if 0
+#ifdef ALLOW_RECNO_OFFSET
+#define GetRecnoKey(db, value) _GetRecnoKey(db, value)
+
+static db_recno_t
+_GetRecnoKey(BerkeleyDB db, I32 value)
+{
+ if (value + RECNO_BASE < 1)
+ softCrash("key value %d < base (%d)", (value), RECNO_BASE?0:1) ;
+ return value + RECNO_BASE ;
+}
+
+#else
+#endif /* ALLOW_RECNO_OFFSET */
+#endif /* 0 */
+
+#define GetRecnoKey(db, value) ((value) + RECNO_BASE )
+
+#endif /* 0 */
+
+#if 0
+static SV *
+GetInternalObject(SV * sv)
+{
+ SV * info = (SV*) NULL ;
+ SV * s ;
+ MAGIC * mg ;
+
+ Trace(("in GetInternalObject %d\n", sv)) ;
+ if (sv == NULL || !SvROK(sv))
+ return NULL ;
+
+ s = SvRV(sv) ;
+ if (SvMAGICAL(s))
+ {
+ if (SvTYPE(s) == SVt_PVHV || SvTYPE(s) == SVt_PVAV)
+ mg = mg_find(s, 'P') ;
+ else
+ mg = mg_find(s, 'q') ;
+
+ /* all this testing is probably overkill, but till I know more
+ about global destruction it stays.
+ */
+ /* if (mg && mg->mg_obj && SvRV(mg->mg_obj) && SvPVX(SvRV(mg->mg_obj))) */
+ if (mg && mg->mg_obj && SvRV(mg->mg_obj) )
+ info = SvRV(mg->mg_obj) ;
+ else
+ info = s ;
+ }
+
+ Trace(("end of GetInternalObject %d\n", info)) ;
+ return info ;
+}
+#endif
+
+static int
+btree_compare(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("in btree_compare - expected 1 return value from compare sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ return (retval) ;
+
+}
+
+static int
+dup_compare(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ Trace(("In dup_compare \n")) ;
+ if (!CurrentDB)
+ softCrash("Internal Error - No CurrentDB in dup_compare") ;
+ if (CurrentDB->dup_compare == NULL)
+ softCrash("in dup_compare: no callback specified for database '%s'", CurrentDB->filename) ;
+
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->dup_compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("dup_compare: expected 1 return value from compare sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ return (retval) ;
+
+}
+
+static size_t
+btree_prefix(DB_callback const DBT * key1, const DBT * key2 )
+{
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ data1 = (char*) key1->data ;
+ data2 = (char*) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->prefix, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("btree_prefix: expected 1 return value from prefix sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+static u_int32_t
+hash_cb(DB_callback const void * data, u_int32_t size)
+{
+ dSP ;
+ int retval ;
+ int count ;
+
+#ifndef newSVpvn
+ if (size == 0)
+ data = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+
+ XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->hash, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ softCrash ("hash_cb: expected 1 return value from hash sub, got %d", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+static void
+db_errcall_cb(const char * db_errpfx, char * buffer)
+{
+#if 0
+
+ if (db_errpfx == NULL)
+ db_errpfx = "" ;
+ if (buffer == NULL )
+ buffer = "" ;
+ ErrBuff[0] = '\0';
+ if (strlen(db_errpfx) + strlen(buffer) + 3 <= 1000) {
+ if (*db_errpfx != '\0') {
+ strcat(ErrBuff, db_errpfx) ;
+ strcat(ErrBuff, ": ") ;
+ }
+ strcat(ErrBuff, buffer) ;
+ }
+
+#endif
+
+ SV * sv = perl_get_sv(ERR_BUFF, FALSE) ;
+ if (sv) {
+ if (db_errpfx)
+ sv_setpvf(sv, "%s: %s", db_errpfx, buffer) ;
+ else
+ sv_setpv(sv, buffer) ;
+ }
+}
+
+static SV *
+readHash(HV * hash, char * key)
+{
+ SV ** svp;
+ svp = hv_fetch(hash, key, strlen(key), FALSE);
+ if (svp && SvOK(*svp))
+ return *svp ;
+ return NULL ;
+}
+
+static void
+hash_delete(char * hash, char * key)
+{
+ HV * hv = perl_get_hv(hash, TRUE);
+ (void) hv_delete(hv, (char*)&key, sizeof(key), G_DISCARD);
+}
+
+static void
+hash_store_iv(char * hash, char * key, IV value)
+{
+ HV * hv = perl_get_hv(hash, TRUE);
+ (void)hv_store(hv, (char*)&key, sizeof(key), newSViv(value), 0);
+ /* printf("hv_store returned %d\n", ret) ; */
+}
+
+static void
+hv_store_iv(HV * hash, char * key, IV value)
+{
+ hv_store(hash, key, strlen(key), newSViv(value), 0);
+}
+
+static BerkeleyDB
+my_db_open(
+ BerkeleyDB db ,
+ SV * ref,
+ SV * ref_dbenv ,
+ BerkeleyDB__Env dbenv ,
+ const char * file,
+ const char * subname,
+ DBTYPE type,
+ int flags,
+ int mode,
+ DB_INFO * info
+ )
+{
+ DB_ENV * env = NULL ;
+ BerkeleyDB RETVAL = NULL ;
+ DB * dbp ;
+ int Status ;
+
+ Trace(("_db_open(dbenv[%lu] ref_dbenv [%lu] file[%s] subname [%s] type[%d] flags[%d] mode[%d]\n",
+ dbenv, ref_dbenv, file, subname, type, flags, mode)) ;
+
+ CurrentDB = db ;
+ if (dbenv)
+ env = dbenv->Env ;
+
+#if DB_VERSION_MAJOR == 2
+ if (subname)
+ softCrash("Subname needs Berkeley DB 3 or better") ;
+#endif
+
+#if DB_VERSION_MAJOR > 2
+ Status = db_create(&dbp, env, 0) ;
+ Trace(("db_create returned %s\n", my_db_strerror(Status))) ;
+ if (Status)
+ return RETVAL ;
+
+ if (info->re_source) {
+ Status = dbp->set_re_source(dbp, info->re_source) ;
+ Trace(("set_re_source [%s] returned %s\n",
+ info->re_source, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_cachesize) {
+ Status = dbp->set_cachesize(dbp, 0, info->db_cachesize, 0) ;
+ Trace(("set_cachesize [%d] returned %s\n",
+ info->db_cachesize, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_lorder) {
+ Status = dbp->set_lorder(dbp, info->db_lorder) ;
+ Trace(("set_lorder [%d] returned %s\n",
+ info->db_lorder, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->db_pagesize) {
+ Status = dbp->set_pagesize(dbp, info->db_pagesize) ;
+ Trace(("set_pagesize [%d] returned %s\n",
+ info->db_pagesize, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_ffactor) {
+ Status = dbp->set_h_ffactor(dbp, info->h_ffactor) ;
+ Trace(("set_h_ffactor [%d] returned %s\n",
+ info->h_ffactor, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_nelem) {
+ Status = dbp->set_h_nelem(dbp, info->h_nelem) ;
+ Trace(("set_h_nelem [%d] returned %s\n",
+ info->h_nelem, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_minkey) {
+ Status = dbp->set_bt_minkey(dbp, info->bt_minkey) ;
+ Trace(("set_bt_minkey [%d] returned %s\n",
+ info->bt_minkey, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_compare) {
+ Status = dbp->set_bt_compare(dbp, info->bt_compare) ;
+ Trace(("set_bt_compare [%d] returned %s\n",
+ info->bt_compare, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->h_hash) {
+ Status = dbp->set_h_hash(dbp, info->h_hash) ;
+ Trace(("set_h_hash [%d] returned %s\n",
+ info->h_hash, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->dup_compare) {
+ Status = dbp->set_dup_compare(dbp, info->dup_compare) ;
+ Trace(("set_dup_compare [%d] returned %s\n",
+ info->dup_compare, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->bt_prefix) {
+ Status = dbp->set_bt_prefix(dbp, info->bt_prefix) ;
+ Trace(("set_bt_prefix [%d] returned %s\n",
+ info->bt_prefix, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_len) {
+ Status = dbp->set_re_len(dbp, info->re_len) ;
+ Trace(("set_re_len [%d] returned %s\n",
+ info->re_len, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_delim) {
+ Status = dbp->set_re_delim(dbp, info->re_delim) ;
+ Trace(("set_re_delim [%d] returned %s\n",
+ info->re_delim, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->re_pad) {
+ Status = dbp->set_re_pad(dbp, info->re_pad) ;
+ Trace(("set_re_pad [%d] returned %s\n",
+ info->re_pad, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->flags) {
+ Status = dbp->set_flags(dbp, info->flags) ;
+ Trace(("set_flags [%d] returned %s\n",
+ info->flags, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+ }
+
+ if (info->q_extentsize) {
+#ifdef AT_LEAST_DB_3_2
+ Status = dbp->set_q_extentsize(dbp, info->q_extentsize) ;
+ Trace(("set_flags [%d] returned %s\n",
+ info->flags, my_db_strerror(Status)));
+ if (Status)
+ return RETVAL ;
+#else
+ softCrash("-ExtentSize needs at least Berkeley DB 3.2.x") ;
+#endif
+ }
+
+ if ((Status = (dbp->open)(dbp, file, subname, type, flags, mode)) == 0) {
+#else /* DB_VERSION_MAJOR == 2 */
+ if ((Status = db_open(file, type, flags, mode, env, info, &dbp)) == 0) {
+#endif /* DB_VERSION_MAJOR == 2 */
+
+ Trace(("db_opened\n"));
+ RETVAL = db ;
+#ifdef AT_LEAST_DB_3_3
+ dbp->set_alloc(dbp, safemalloc, MyRealloc, safefree) ;
+#endif
+ RETVAL->dbp = dbp ;
+#if DB_VERSION_MAJOR == 2
+ RETVAL->type = dbp->type ;
+#else /* DB_VERSION_MAJOR > 2 */
+#ifdef AT_LEAST_DB_3_3
+ dbp->get_type(dbp, &RETVAL->type) ;
+#else /* DB 3.0 -> 3.2 */
+ RETVAL->type = dbp->get_type(dbp) ;
+#endif
+#endif /* DB_VERSION_MAJOR > 2 */
+ RETVAL->recno_or_queue = (RETVAL->type == DB_RECNO ||
+ RETVAL->type == DB_QUEUE) ;
+ RETVAL->filename = my_strdup(file) ;
+ RETVAL->Status = Status ;
+ RETVAL->active = TRUE ;
+ hash_store_iv("BerkeleyDB::Term::Db", (char *)RETVAL, 1) ;
+ Trace((" storing %d %d in BerkeleyDB::Term::Db\n", RETVAL, dbp)) ;
+ if (dbenv) {
+ RETVAL->parent_env = dbenv ;
+ dbenv->Status = Status ;
+ ++ dbenv->open_dbs ;
+ }
+ }
+ else {
+#if DB_VERSION_MAJOR > 2
+ (dbp->close)(dbp, 0) ;
+#endif
+ destroyDB(db) ;
+ Trace(("db open returned %s\n", my_db_strerror(Status))) ;
+ }
+
+ return RETVAL ;
+}
+
+static double
+constant(char * name, int arg)
+{
+ errno = 0;
+ switch (*name) {
+ case 'A':
+ break;
+ case 'B':
+ break;
+ case 'C':
+ break;
+ case 'D':
+ if (strEQ(name, "DB_AFTER"))
+#ifdef DB_AFTER
+ return DB_AFTER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_APPEND"))
+#ifdef DB_APPEND
+ return DB_APPEND;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ARCH_ABS"))
+#ifdef DB_ARCH_ABS
+ return DB_ARCH_ABS;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ARCH_DATA"))
+#ifdef DB_ARCH_DATA
+ return DB_ARCH_DATA;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ARCH_LOG"))
+#ifdef DB_ARCH_LOG
+ return DB_ARCH_LOG;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_BEFORE"))
+#ifdef DB_BEFORE
+ return DB_BEFORE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_BTREE"))
+ return DB_BTREE;
+ if (strEQ(name, "DB_BTREEMAGIC"))
+#ifdef DB_BTREEMAGIC
+ return DB_BTREEMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_BTREEOLDVER"))
+#ifdef DB_BTREEOLDVER
+ return DB_BTREEOLDVER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_BTREEVERSION"))
+#ifdef DB_BTREEVERSION
+ return DB_BTREEVERSION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CHECKPOINT"))
+#ifdef DB_CHECKPOINT
+ return DB_CHECKPOINT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CONSUME"))
+#ifdef DB_CONSUME
+ return DB_CONSUME;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CONSUME_WAIT"))
+#ifdef DB_CONSUME_WAIT
+ return DB_CONSUME_WAIT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CREATE"))
+#ifdef DB_CREATE
+ return DB_CREATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CURLSN"))
+#ifdef DB_CURLSN
+ return DB_CURLSN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_CURRENT"))
+#ifdef DB_CURRENT
+ return DB_CURRENT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DBT_MALLOC"))
+#ifdef DB_DBT_MALLOC
+ return DB_DBT_MALLOC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DBT_PARTIAL"))
+#ifdef DB_DBT_PARTIAL
+ return DB_DBT_PARTIAL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DBT_USERMEM"))
+#ifdef DB_DBT_USERMEM
+ return DB_DBT_USERMEM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DELETED"))
+#ifdef DB_DELETED
+ return DB_DELETED;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DELIMITER"))
+#ifdef DB_DELIMITER
+ return DB_DELIMITER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DUP"))
+#ifdef DB_DUP
+ return DB_DUP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_DUPSORT"))
+#ifdef DB_DUPSORT
+ return DB_DUPSORT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ENV_APPINIT"))
+#ifdef DB_ENV_APPINIT
+ return DB_ENV_APPINIT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ENV_STANDALONE"))
+#ifdef DB_ENV_STANDALONE
+ return DB_ENV_STANDALONE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_ENV_THREAD"))
+#ifdef DB_ENV_THREAD
+ return DB_ENV_THREAD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_EXCL"))
+#ifdef DB_EXCL
+ return DB_EXCL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FILE_ID_LEN"))
+#ifdef DB_FILE_ID_LEN
+ return DB_FILE_ID_LEN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FIRST"))
+#ifdef DB_FIRST
+ return DB_FIRST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FIXEDLEN"))
+#ifdef DB_FIXEDLEN
+ return DB_FIXEDLEN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FLUSH"))
+#ifdef DB_FLUSH
+ return DB_FLUSH;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_FORCE"))
+#ifdef DB_FORCE
+ return DB_FORCE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_GET_BOTH"))
+#ifdef DB_GET_BOTH
+ return DB_GET_BOTH;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_GET_RECNO"))
+#ifdef DB_GET_RECNO
+ return DB_GET_RECNO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_HASH"))
+ return DB_HASH;
+ if (strEQ(name, "DB_HASHMAGIC"))
+#ifdef DB_HASHMAGIC
+ return DB_HASHMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_HASHOLDVER"))
+#ifdef DB_HASHOLDVER
+ return DB_HASHOLDVER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_HASHVERSION"))
+#ifdef DB_HASHVERSION
+ return DB_HASHVERSION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INCOMPLETE"))
+#ifdef DB_INCOMPLETE
+ return DB_INCOMPLETE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_CDB"))
+#ifdef DB_INIT_CDB
+ return DB_INIT_CDB;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_LOCK"))
+#ifdef DB_INIT_LOCK
+ return DB_INIT_LOCK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_LOG"))
+#ifdef DB_INIT_LOG
+ return DB_INIT_LOG;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_MPOOL"))
+#ifdef DB_INIT_MPOOL
+ return DB_INIT_MPOOL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_INIT_TXN"))
+#ifdef DB_INIT_TXN
+ return DB_INIT_TXN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_JOIN_ITEM"))
+#ifdef DB_JOIN_ITEM
+ return DB_JOIN_ITEM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_JOINENV"))
+#ifdef DB_JOINENV
+ return DB_JOINENV;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_KEYEMPTY"))
+#ifdef DB_KEYEMPTY
+ return DB_KEYEMPTY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_KEYEXIST"))
+#ifdef DB_KEYEXIST
+ return DB_KEYEXIST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_KEYFIRST"))
+#ifdef DB_KEYFIRST
+ return DB_KEYFIRST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_KEYLAST"))
+#ifdef DB_KEYLAST
+ return DB_KEYLAST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LAST"))
+#ifdef DB_LAST
+ return DB_LAST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCKMAGIC"))
+#ifdef DB_LOCKMAGIC
+ return DB_LOCKMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCKVERSION"))
+#ifdef DB_LOCKVERSION
+ return DB_LOCKVERSION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_CONFLICT"))
+#ifdef DB_LOCK_CONFLICT
+ return DB_LOCK_CONFLICT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_DEADLOCK"))
+#ifdef DB_LOCK_DEADLOCK
+ return DB_LOCK_DEADLOCK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_DEFAULT"))
+#ifdef DB_LOCK_DEFAULT
+ return DB_LOCK_DEFAULT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_GET"))
+ return DB_LOCK_GET;
+ if (strEQ(name, "DB_LOCK_NORUN"))
+#ifdef DB_LOCK_NORUN
+ return DB_LOCK_NORUN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_NOTGRANTED"))
+#ifdef DB_LOCK_NOTGRANTED
+ return DB_LOCK_NOTGRANTED;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_NOTHELD"))
+#ifdef DB_LOCK_NOTHELD
+ return DB_LOCK_NOTHELD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_NOWAIT"))
+#ifdef DB_LOCK_NOWAIT
+ return DB_LOCK_NOWAIT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_OLDEST"))
+#ifdef DB_LOCK_OLDEST
+ return DB_LOCK_OLDEST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_RANDOM"))
+#ifdef DB_LOCK_RANDOM
+ return DB_LOCK_RANDOM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_RIW_N"))
+#ifdef DB_LOCK_RIW_N
+ return DB_LOCK_RIW_N;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_RW_N"))
+#ifdef DB_LOCK_RW_N
+ return DB_LOCK_RW_N;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOCK_YOUNGEST"))
+#ifdef DB_LOCK_YOUNGEST
+ return DB_LOCK_YOUNGEST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOGMAGIC"))
+#ifdef DB_LOGMAGIC
+ return DB_LOGMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_LOGOLDVER"))
+#ifdef DB_LOGOLDVER
+ return DB_LOGOLDVER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MAX_PAGES"))
+#ifdef DB_MAX_PAGES
+ return DB_MAX_PAGES;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MAX_RECORDS"))
+#ifdef DB_MAX_RECORDS
+ return DB_MAX_RECORDS;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_CLEAN"))
+#ifdef DB_MPOOL_CLEAN
+ return DB_MPOOL_CLEAN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_CREATE"))
+#ifdef DB_MPOOL_CREATE
+ return DB_MPOOL_CREATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_DIRTY"))
+#ifdef DB_MPOOL_DIRTY
+ return DB_MPOOL_DIRTY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_DISCARD"))
+#ifdef DB_MPOOL_DISCARD
+ return DB_MPOOL_DISCARD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_LAST"))
+#ifdef DB_MPOOL_LAST
+ return DB_MPOOL_LAST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_NEW"))
+#ifdef DB_MPOOL_NEW
+ return DB_MPOOL_NEW;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MPOOL_PRIVATE"))
+#ifdef DB_MPOOL_PRIVATE
+ return DB_MPOOL_PRIVATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MUTEXDEBUG"))
+#ifdef DB_MUTEXDEBUG
+ return DB_MUTEXDEBUG;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_MUTEXLOCKS"))
+#ifdef DB_MUTEXLOCKS
+ return DB_MUTEXLOCKS;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NEEDSPLIT"))
+#ifdef DB_NEEDSPLIT
+ return DB_NEEDSPLIT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NEXT"))
+#ifdef DB_NEXT
+ return DB_NEXT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NEXT_DUP"))
+#ifdef DB_NEXT_DUP
+ return DB_NEXT_DUP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NOMMAP"))
+#ifdef DB_NOMMAP
+ return DB_NOMMAP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NOOVERWRITE"))
+#ifdef DB_NOOVERWRITE
+ return DB_NOOVERWRITE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NOSYNC"))
+#ifdef DB_NOSYNC
+ return DB_NOSYNC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_NOTFOUND"))
+#ifdef DB_NOTFOUND
+ return DB_NOTFOUND;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_PAD"))
+#ifdef DB_PAD
+ return DB_PAD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_PAGEYIELD"))
+#ifdef DB_PAGEYIELD
+ return DB_PAGEYIELD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_POSITION"))
+#ifdef DB_POSITION
+ return DB_POSITION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_PREV"))
+#ifdef DB_PREV
+ return DB_PREV;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_PRIVATE"))
+#ifdef DB_PRIVATE
+ return DB_PRIVATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_QUEUE"))
+ return DB_QUEUE;
+ if (strEQ(name, "DB_RDONLY"))
+#ifdef DB_RDONLY
+ return DB_RDONLY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RECNO"))
+ return DB_RECNO;
+ if (strEQ(name, "DB_RECNUM"))
+#ifdef DB_RECNUM
+ return DB_RECNUM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RECORDCOUNT"))
+#ifdef DB_RECORDCOUNT
+ return DB_RECORDCOUNT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RECOVER"))
+#ifdef DB_RECOVER
+ return DB_RECOVER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RECOVER_FATAL"))
+#ifdef DB_RECOVER_FATAL
+ return DB_RECOVER_FATAL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_REGISTERED"))
+#ifdef DB_REGISTERED
+ return DB_REGISTERED;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RENUMBER"))
+#ifdef DB_RENUMBER
+ return DB_RENUMBER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RMW"))
+#ifdef DB_RMW
+ return DB_RMW;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_RUNRECOVERY"))
+#ifdef DB_RUNRECOVERY
+ return DB_RUNRECOVERY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SEQUENTIAL"))
+#ifdef DB_SEQUENTIAL
+ return DB_SEQUENTIAL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SET"))
+#ifdef DB_SET
+ return DB_SET;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SET_RANGE"))
+#ifdef DB_SET_RANGE
+ return DB_SET_RANGE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SET_RECNO"))
+#ifdef DB_SET_RECNO
+ return DB_SET_RECNO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SNAPSHOT"))
+#ifdef DB_SNAPSHOT
+ return DB_SNAPSHOT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SWAPBYTES"))
+#ifdef DB_SWAPBYTES
+ return DB_SWAPBYTES;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TEMPORARY"))
+#ifdef DB_TEMPORARY
+ return DB_TEMPORARY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_THREAD"))
+#ifdef DB_THREAD
+ return DB_THREAD;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TRUNCATE"))
+#ifdef DB_TRUNCATE
+ return DB_TRUNCATE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXNMAGIC"))
+#ifdef DB_TXNMAGIC
+ return DB_TXNMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXNVERSION"))
+#ifdef DB_TXNVERSION
+ return DB_TXNVERSION;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_BACKWARD_ROLL"))
+ return DB_TXN_BACKWARD_ROLL;
+ if (strEQ(name, "DB_TXN_CKP"))
+#ifdef DB_TXN_CKP
+ return DB_TXN_CKP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_FORWARD_ROLL"))
+ return DB_TXN_FORWARD_ROLL;
+ if (strEQ(name, "DB_TXN_LOCK_2PL"))
+#ifdef DB_TXN_LOCK_2PL
+ return DB_TXN_LOCK_2PL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOCK_MASK"))
+#ifdef DB_TXN_LOCK_MASK
+ return DB_TXN_LOCK_MASK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOCK_OPTIMIST"))
+#ifdef DB_TXN_LOCK_OPTIMIST
+ return DB_TXN_LOCK_OPTIMIST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOCK_OPTIMISTIC"))
+#ifdef DB_TXN_LOCK_OPTIMISTIC
+ return DB_TXN_LOCK_OPTIMISTIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOG_MASK"))
+#ifdef DB_TXN_LOG_MASK
+ return DB_TXN_LOG_MASK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOG_REDO"))
+#ifdef DB_TXN_LOG_REDO
+ return DB_TXN_LOG_REDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOG_UNDO"))
+#ifdef DB_TXN_LOG_UNDO
+ return DB_TXN_LOG_UNDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_LOG_UNDOREDO"))
+#ifdef DB_TXN_LOG_UNDOREDO
+ return DB_TXN_LOG_UNDOREDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_NOSYNC"))
+#ifdef DB_TXN_NOSYNC
+ return DB_TXN_NOSYNC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_NOWAIT"))
+#ifdef DB_TXN_NOWAIT
+ return DB_TXN_NOWAIT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_OPENFILES"))
+ return DB_TXN_OPENFILES;
+ if (strEQ(name, "DB_TXN_REDO"))
+#ifdef DB_TXN_REDO
+ return DB_TXN_REDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_SYNC"))
+#ifdef DB_TXN_SYNC
+ return DB_TXN_SYNC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN_UNDO"))
+#ifdef DB_TXN_UNDO
+ return DB_TXN_UNDO;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_UNKNOWN"))
+ return DB_UNKNOWN;
+ if (strEQ(name, "DB_USE_ENVIRON"))
+#ifdef DB_USE_ENVIRON
+ return DB_USE_ENVIRON;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_USE_ENVIRON_ROOT"))
+#ifdef DB_USE_ENVIRON_ROOT
+ return DB_USE_ENVIRON_ROOT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_VERSION_MAJOR"))
+#ifdef DB_VERSION_MAJOR
+ return DB_VERSION_MAJOR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_VERSION_MINOR"))
+#ifdef DB_VERSION_MINOR
+ return DB_VERSION_MINOR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_VERSION_PATCH"))
+#ifdef DB_VERSION_PATCH
+ return DB_VERSION_PATCH;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_WRITECURSOR"))
+#ifdef DB_WRITECURSOR
+ return DB_WRITECURSOR;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'E':
+ break;
+ case 'F':
+ break;
+ case 'G':
+ break;
+ case 'H':
+ break;
+ case 'I':
+ break;
+ case 'J':
+ break;
+ case 'K':
+ break;
+ case 'L':
+ break;
+ case 'M':
+ break;
+ case 'N':
+ break;
+ case 'O':
+ break;
+ case 'P':
+ break;
+ case 'Q':
+ break;
+ case 'R':
+ break;
+ case 'S':
+ break;
+ case 'T':
+ break;
+ case 'U':
+ break;
+ case 'V':
+ break;
+ case 'W':
+ break;
+ case 'X':
+ break;
+ case 'Y':
+ break;
+ case 'Z':
+ break;
+ case 'a':
+ break;
+ case 'b':
+ break;
+ case 'c':
+ break;
+ case 'd':
+ break;
+ case 'e':
+ break;
+ case 'f':
+ break;
+ case 'g':
+ break;
+ case 'h':
+ break;
+ case 'i':
+ break;
+ case 'j':
+ break;
+ case 'k':
+ break;
+ case 'l':
+ break;
+ case 'm':
+ break;
+ case 'n':
+ break;
+ case 'o':
+ break;
+ case 'p':
+ break;
+ case 'q':
+ break;
+ case 'r':
+ break;
+ case 's':
+ break;
+ case 't':
+ break;
+ case 'u':
+ break;
+ case 'v':
+ break;
+ case 'w':
+ break;
+ case 'x':
+ break;
+ case 'y':
+ break;
+ case 'z':
+ break;
+ }
+ errno = EINVAL;
+ return 0;
+
+not_there:
+ errno = ENOENT;
+ return 0;
+}
+
+
+MODULE = BerkeleyDB PACKAGE = BerkeleyDB PREFIX = env_
+
+char *
+DB_VERSION_STRING()
+ CODE:
+ RETVAL = DB_VERSION_STRING ;
+ OUTPUT:
+ RETVAL
+
+
+double
+constant(name,arg)
+ char * name
+ int arg
+
+#define env_db_version(maj, min, patch) db_version(&maj, &min, &patch)
+char *
+env_db_version(maj, min, patch)
+ int maj
+ int min
+ int patch
+ OUTPUT:
+ RETVAL
+ maj
+ min
+ patch
+
+int
+db_value_set(value, which)
+ int value
+ int which
+ NOT_IMPLEMENTED_YET
+
+
+DualType
+_db_remove(ref)
+ SV * ref
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("BerkeleyDB::db_remove needs Berkeley DB 3.x or better") ;
+#else
+ HV * hash ;
+ DB * dbp ;
+ SV * sv ;
+ const char * db = NULL ;
+ const char * subdb = NULL ;
+ BerkeleyDB__Env env = NULL ;
+ DB_ENV * dbenv = NULL ;
+ u_int32_t flags = 0 ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(db, "Filename", char *) ;
+ SetValue_pv(subdb, "Subname", char *) ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_ov(env, "Env", BerkeleyDB__Env) ;
+ if (env)
+ dbenv = env->Env ;
+ RETVAL = db_create(&dbp, dbenv, 0) ;
+ if (RETVAL == 0) {
+ RETVAL = dbp->remove(dbp, db, subdb, flags) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+MODULE = BerkeleyDB::Env PACKAGE = BerkeleyDB::Env PREFIX = env_
+
+
+BerkeleyDB::Env::Raw
+_db_appinit(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ char * home = NULL ;
+ char * server = NULL ;
+ char ** config = NULL ;
+ int flags = 0 ;
+ int cachesize = 0 ;
+ int lk_detect = 0 ;
+ SV * errprefix = NULL;
+ DB_ENV * env ;
+ int status ;
+
+ Trace(("in _db_appinit [%s] %d\n", self, ref)) ;
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(home, "Home", char *) ;
+ SetValue_pv(config, "Config", char **) ;
+ SetValue_sv(errprefix, "ErrPrefix") ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_pv(server, "Server", char *) ;
+ SetValue_iv(cachesize, "Cachesize") ;
+ SetValue_iv(lk_detect, "LockDetect") ;
+#ifndef AT_LEAST_DB_3_1
+ if (server)
+ softCrash("-Server needs Berkeley DB 3.1 or better") ;
+#endif /* ! AT_LEAST_DB_3_1 */
+ Trace(("_db_appinit(config=[%d], home=[%s],errprefix=[%s],flags=[%d]\n",
+ config, home, errprefix, flags)) ;
+#ifdef TRACE
+ if (config) {
+ int i ;
+ for (i = 0 ; i < 10 ; ++ i) {
+ if (config[i] == NULL) {
+ printf(" End\n") ;
+ break ;
+ }
+ printf(" config = [%s]\n", config[i]) ;
+ }
+ }
+#endif /* TRACE */
+ ZMALLOC(RETVAL, BerkeleyDB_ENV_type) ;
+ if (flags & DB_INIT_TXN)
+ RETVAL->txn_enabled = TRUE ;
+#if DB_VERSION_MAJOR == 2
+ ZMALLOC(RETVAL->Env, DB_ENV) ;
+ env = RETVAL->Env ;
+ {
+ /* Take a copy of the error prefix */
+ if (errprefix) {
+ Trace(("copying errprefix\n" )) ;
+ RETVAL->ErrPrefix = newSVsv(errprefix) ;
+ SvPOK_only(RETVAL->ErrPrefix) ;
+ }
+ if (RETVAL->ErrPrefix)
+ RETVAL->Env->db_errpfx = SvPVX(RETVAL->ErrPrefix) ;
+
+ if ((sv = readHash(hash, "ErrFile")) && sv != &PL_sv_undef) {
+ env->db_errfile = GetFILEptr(sv);
+ RETVAL->ErrHandle = newRV(sv) ;
+ }
+ /* SetValue_io(RETVAL->Env.db_errfile, "ErrFile") ; */
+ SetValue_iv(env->db_verbose, "Verbose") ;
+ /* env->db_errbuf = RETVAL->ErrBuff ; */
+ env->db_errcall = db_errcall_cb ;
+ RETVAL->active = TRUE ;
+ status = db_appinit(home, config, env, flags) ;
+ Trace((" status = %d env %d Env %d\n", status, RETVAL, env)) ;
+ if (status == 0)
+ hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
+ else {
+ if (RETVAL->ErrHandle)
+ SvREFCNT_dec(RETVAL->ErrHandle) ;
+ if (RETVAL->ErrPrefix)
+ SvREFCNT_dec(RETVAL->ErrPrefix) ;
+ Safefree(RETVAL->Env) ;
+ Safefree(RETVAL) ;
+ RETVAL = NULL ;
+ }
+ }
+#else /* DB_VERSION_MAJOR > 2 */
+#ifndef AT_LEAST_DB_3_1
+# define DB_CLIENT 0
+#endif
+ status = db_env_create(&RETVAL->Env, server ? DB_CLIENT : 0) ;
+ Trace(("db_env_create flags = %d returned %s\n", flags,
+ my_db_strerror(status))) ;
+ env = RETVAL->Env ;
+#ifdef AT_LEAST_DB_3_3
+ env->set_alloc(env, safemalloc, MyRealloc, safefree) ;
+#endif
+ if (status == 0 && cachesize) {
+ status = env->set_cachesize(env, 0, cachesize, 0) ;
+ Trace(("set_cachesize [%d] returned %s\n",
+ cachesize, my_db_strerror(status)));
+ }
+
+ if (status == 0 && lk_detect) {
+ status = env->set_lk_detect(env, lk_detect) ;
+ Trace(("set_lk_detect [%d] returned %s\n",
+ lk_detect, my_db_strerror(status)));
+ }
+#ifdef AT_LEAST_DB_3_1
+ /* set the server */
+ if (server && status == 0)
+ {
+ status = env->set_server(env, server, 0, 0, 0);
+ Trace(("ENV->set_server server = %s returned %s\n", server,
+ my_db_strerror(status))) ;
+ }
+#endif
+ if (status == 0)
+ {
+ int mode = 0 ;
+ /* Take a copy of the error prefix */
+ if (errprefix) {
+ Trace(("copying errprefix\n" )) ;
+ RETVAL->ErrPrefix = newSVsv(errprefix) ;
+ SvPOK_only(RETVAL->ErrPrefix) ;
+ }
+ if (RETVAL->ErrPrefix)
+ env->set_errpfx(env, SvPVX(RETVAL->ErrPrefix)) ;
+
+ if ((sv = readHash(hash, "ErrFile")) && sv != &PL_sv_undef) {
+ env->set_errfile(env, GetFILEptr(sv)) ;
+ RETVAL->ErrHandle = newRV(sv) ;
+ }
+ /* SetValue_iv(RETVAL->Env.db_verbose, "Verbose") ; */ /* TODO */
+ SetValue_iv(mode, "Mode") ;
+ /* RETVAL->Env.db_errbuf = RETVAL->ErrBuff ; */
+ env->set_errcall(env, db_errcall_cb) ;
+ RETVAL->active = TRUE ;
+#ifdef IS_DB_3_0_x
+ status = (env->open)(env, home, config, flags, mode) ;
+#else /* > 3.0 */
+ status = (env->open)(env, home, flags, mode) ;
+#endif
+ Trace(("ENV->open returned %s\n", my_db_strerror(status))) ;
+ }
+
+ if (status == 0)
+ hash_store_iv("BerkeleyDB::Term::Env", (char *)RETVAL, 1) ;
+ else {
+ (env->close)(env, 0) ;
+ if (RETVAL->ErrHandle)
+ SvREFCNT_dec(RETVAL->ErrHandle) ;
+ if (RETVAL->ErrPrefix)
+ SvREFCNT_dec(RETVAL->ErrPrefix) ;
+ Safefree(RETVAL) ;
+ RETVAL = NULL ;
+ }
+#endif /* DB_VERSION_MAJOR > 2 */
+ }
+ OUTPUT:
+ RETVAL
+
+BerkeleyDB::Txn::Raw
+_txn_begin(env, pid=NULL, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Env env
+ BerkeleyDB::Txn pid
+ CODE:
+ {
+ DB_TXN *txn ;
+ DB_TXN *p_id = NULL ;
+ Trace(("txn_begin pid %d, flags %d\n", pid, flags)) ;
+#if DB_VERSION_MAJOR == 2
+ if (env->Env->tx_info == NULL)
+ softCrash("Transaction Manager not enabled") ;
+#endif
+ if (!env->txn_enabled)
+ softCrash("Transaction Manager not enabled") ;
+ if (pid)
+ p_id = pid->txn ;
+ env->TxnMgrStatus =
+#if DB_VERSION_MAJOR == 2
+ txn_begin(env->Env->tx_info, p_id, &txn) ;
+#else
+ txn_begin(env->Env, p_id, &txn, flags) ;
+#endif
+ if (env->TxnMgrStatus == 0) {
+ ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
+ RETVAL->txn = txn ;
+ RETVAL->active = TRUE ;
+ Trace(("_txn_begin created txn [%d] in [%d]\n", txn, RETVAL));
+ hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
+ }
+ else
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+#if DB_VERSION_MAJOR == 2
+# define env_txn_checkpoint(e,k,m) txn_checkpoint(e->Env->tx_info, k, m)
+#else /* DB 3.0 or better */
+# ifdef AT_LEAST_DB_3_1
+# define env_txn_checkpoint(e,k,m) txn_checkpoint(e->Env, k, m, 0)
+# else
+# define env_txn_checkpoint(e,k,m) txn_checkpoint(e->Env, k, m)
+# endif
+#endif
+DualType
+env_txn_checkpoint(env, kbyte, min)
+ BerkeleyDB::Env env
+ long kbyte
+ long min
+
+HV *
+txn_stat(env)
+ BerkeleyDB::Env env
+ HV * RETVAL = NULL ;
+ CODE:
+ {
+ DB_TXN_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ if(txn_stat(env->Env, &stat) == 0) {
+#else
+#if DB_VERSION_MAJOR == 2
+ if(txn_stat(env->Env->tx_info, &stat, safemalloc) == 0) {
+#else
+ if(txn_stat(env->Env, &stat, safemalloc) == 0) {
+#endif
+#endif
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
+ hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
+ hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
+ hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
+ hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
+ hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
+ hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
+ hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
+ hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
+ hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
+#endif
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+#define EnDis(x) ((x) ? "Enabled" : "Disabled")
+void
+printEnv(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+#if 0
+ printf("env [0x%X]\n", env) ;
+ printf(" ErrPrefix [%s]\n", env->ErrPrefix
+ ? SvPVX(env->ErrPrefix) : 0) ;
+ printf(" DB_ENV\n") ;
+ printf(" db_lorder [%d]\n", env->Env.db_lorder) ;
+ printf(" db_home [%s]\n", env->Env.db_home) ;
+ printf(" db_data_dir [%s]\n", env->Env.db_data_dir) ;
+ printf(" db_log_dir [%s]\n", env->Env.db_log_dir) ;
+ printf(" db_tmp_dir [%s]\n", env->Env.db_tmp_dir) ;
+ printf(" lk_info [%s]\n", EnDis(env->Env.lk_info)) ;
+ printf(" lk_max [%d]\n", env->Env.lk_max) ;
+ printf(" lg_info [%s]\n", EnDis(env->Env.lg_info)) ;
+ printf(" lg_max [%d]\n", env->Env.lg_max) ;
+ printf(" mp_info [%s]\n", EnDis(env->Env.mp_info)) ;
+ printf(" mp_size [%d]\n", env->Env.mp_size) ;
+ printf(" tx_info [%s]\n", EnDis(env->Env.tx_info)) ;
+ printf(" tx_max [%d]\n", env->Env.tx_max) ;
+ printf(" flags [%d]\n", env->Env.flags) ;
+ printf("\n") ;
+#endif
+
+SV *
+errPrefix(env, prefix)
+ BerkeleyDB::Env env
+ SV * prefix
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+ if (env->ErrPrefix) {
+ RETVAL = newSVsv(env->ErrPrefix) ;
+ SvPOK_only(RETVAL) ;
+ sv_setsv(env->ErrPrefix, prefix) ;
+ }
+ else {
+ RETVAL = NULL ;
+ env->ErrPrefix = newSVsv(prefix) ;
+ }
+ SvPOK_only(env->ErrPrefix) ;
+#if DB_VERSION_MAJOR == 2
+ env->Env->db_errpfx = SvPVX(env->ErrPrefix) ;
+#else
+ env->Env->set_errpfx(env->Env, SvPVX(env->ErrPrefix)) ;
+#endif
+ OUTPUT:
+ RETVAL
+
+DualType
+status(env)
+ BerkeleyDB::Env env
+ CODE:
+ RETVAL = env->Status ;
+ OUTPUT:
+ RETVAL
+
+DualType
+db_appexit(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ CODE:
+#ifdef STRICT_CLOSE
+ if (env->open_dbs)
+ softCrash("attempted to close an environment with %d open database(s)",
+ env->open_dbs) ;
+#endif /* STRICT_CLOSE */
+#if DB_VERSION_MAJOR == 2
+ RETVAL = db_appexit(env->Env) ;
+#else
+ RETVAL = (env->Env->close)(env->Env, 0) ;
+#endif
+ env->active = FALSE ;
+ hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
+ OUTPUT:
+ RETVAL
+
+
+void
+_DESTROY(env)
+ BerkeleyDB::Env env
+ int RETVAL = 0 ;
+ CODE:
+ Trace(("In BerkeleyDB::Env::DESTROY\n"));
+ Trace((" env %ld Env %ld dirty %d\n", env, &env->Env, PL_dirty)) ;
+ if (env->active)
+#if DB_VERSION_MAJOR == 2
+ db_appexit(env->Env) ;
+#else
+ (env->Env->close)(env->Env, 0) ;
+#endif
+ if (env->ErrHandle)
+ SvREFCNT_dec(env->ErrHandle) ;
+ if (env->ErrPrefix)
+ SvREFCNT_dec(env->ErrPrefix) ;
+#if DB_VERSION_MAJOR == 2
+ Safefree(env->Env) ;
+#endif
+ Safefree(env) ;
+ hash_delete("BerkeleyDB::Term::Env", (char *)env) ;
+ Trace(("End of BerkeleyDB::Env::DESTROY %d\n", RETVAL)) ;
+
+BerkeleyDB::TxnMgr::Raw
+_TxnMgr(env)
+ BerkeleyDB::Env env
+ INIT:
+ ckActive_Environment(env->active) ;
+ if (!env->txn_enabled)
+ softCrash("Transaction Manager not enabled") ;
+ CODE:
+ ZMALLOC(RETVAL, BerkeleyDB_TxnMgr_type) ;
+ RETVAL->env = env ;
+ /* hash_store_iv("BerkeleyDB::Term::TxnMgr", (char *)txn, 1) ; */
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_lg_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_bsize(env, bsize)
+ BerkeleyDB::Env env
+ u_int32_t bsize
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_lg_bsize needs Berkeley DB 3.0.55 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_bsize(env->Env, bsize);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_lg_max(env, lg_max)
+ BerkeleyDB::Env env
+ u_int32_t lg_max
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_lg_max needs Berkeley DB 3.0.55 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_lg_max(env->Env, lg_max);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_data_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_data_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_data_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_tmp_dir(env, dir)
+ BerkeleyDB::Env env
+ char * dir
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("$env->set_tmp_dir needs Berkeley DB 3.1 or better") ;
+#else
+ RETVAL = env->Status = env->Env->set_tmp_dir(env->Env, dir);
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+set_mutexlocks(env, do_lock)
+ BerkeleyDB::Env env
+ int do_lock
+ INIT:
+ ckActive_Database(env->active) ;
+ CODE:
+#ifndef AT_LEAST_DB_3
+ softCrash("$env->set_setmutexlocks needs Berkeley DB 3.0 or better") ;
+#else
+#if defined(AT_LEAST_DB_3_2_6) || defined(IS_DB_3_0_x)
+ RETVAL = env->Status = env->Env->set_mutexlocks(env->Env, do_lock);
+#else /* DB 3.1 or 3.2.3 */
+ RETVAL = env->Status = db_env_set_mutexlocks(do_lock);
+#endif
+#endif
+ OUTPUT:
+ RETVAL
+
+MODULE = BerkeleyDB::Term PACKAGE = BerkeleyDB::Term
+
+void
+close_everything()
+
+#define safeCroak(string) softCrash(string)
+void
+safeCroak(string)
+ char * string
+
+MODULE = BerkeleyDB::Hash PACKAGE = BerkeleyDB::Hash PREFIX = hash_
+
+BerkeleyDB::Hash::Raw
+_db_open_hash(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+
+ Trace(("_db_open_hash start\n")) ;
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char *) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.h_ffactor, "Ffactor") ;
+ SetValue_iv(info.h_nelem, "Nelem") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+ if ((sv = readHash(hash, "Hash")) && sv != &PL_sv_undef) {
+ info.h_hash = hash_cb ;
+ db->hash = newSVsv(sv) ;
+ }
+ /* DB_DUPSORT was introduced in DB 2.5.9 */
+ if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
+#ifdef DB_DUPSORT
+ info.dup_compare = dup_compare ;
+ db->dup_compare = newSVsv(sv) ;
+ info.flags |= DB_DUP|DB_DUPSORT ;
+#else
+ croak("DupCompare needs Berkeley DB 2.5.9 or later") ;
+#endif
+ }
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_HASH, flags, mode, &info) ;
+ Trace(("_db_open_hash end\n")) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+HV *
+db_stat(db, flags=0)
+ int flags
+ BerkeleyDB::Common db
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("$db->db_stat for a Hash needs Berkeley DB 3.x or better") ;
+#else
+ DB_HASH_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "hash_magic", stat->hash_magic) ;
+ hv_store_iv(RETVAL, "hash_version", stat->hash_version);
+ hv_store_iv(RETVAL, "hash_pagesize", stat->hash_pagesize);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "hash_nkeys", stat->hash_nkeys);
+ hv_store_iv(RETVAL, "hash_ndata", stat->hash_ndata);
+#else
+ hv_store_iv(RETVAL, "hash_nrecs", stat->hash_nrecs);
+#endif
+ hv_store_iv(RETVAL, "hash_nelem", stat->hash_nelem);
+ hv_store_iv(RETVAL, "hash_ffactor", stat->hash_ffactor);
+ hv_store_iv(RETVAL, "hash_buckets", stat->hash_buckets);
+ hv_store_iv(RETVAL, "hash_free", stat->hash_free);
+ hv_store_iv(RETVAL, "hash_bfree", stat->hash_bfree);
+ hv_store_iv(RETVAL, "hash_bigpages", stat->hash_bigpages);
+ hv_store_iv(RETVAL, "hash_big_bfree", stat->hash_big_bfree);
+ hv_store_iv(RETVAL, "hash_overflows", stat->hash_overflows);
+ hv_store_iv(RETVAL, "hash_ovfl_free", stat->hash_ovfl_free);
+ hv_store_iv(RETVAL, "hash_dup", stat->hash_dup);
+ hv_store_iv(RETVAL, "hash_dup_free", stat->hash_dup_free);
+#if DB_VERSION_MAJOR >= 3
+ hv_store_iv(RETVAL, "hash_metaflags", stat->hash_metaflags);
+#endif
+ safefree(stat) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Unknown PACKAGE = BerkeleyDB::Unknown PREFIX = hash_
+
+void
+_db_open_unknown(ref)
+ SV * ref
+ PPCODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+ BerkeleyDB RETVAL ;
+ static char * Names[] = {"", "Btree", "Hash", "Recno"} ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char *) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.h_ffactor, "Ffactor") ;
+ SetValue_iv(info.h_nelem, "Nelem") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_UNKNOWN, flags, mode, &info) ;
+ XPUSHs(sv_2mortal(newSViv(PTR2IV(RETVAL))));
+ if (RETVAL)
+ XPUSHs(sv_2mortal(newSVpv(Names[RETVAL->type], 0))) ;
+ else
+ XPUSHs(sv_2mortal(newSViv((IV)NULL)));
+ }
+
+
+
+MODULE = BerkeleyDB::Btree PACKAGE = BerkeleyDB::Btree PREFIX = btree_
+
+BerkeleyDB::Btree::Raw
+_db_open_btree(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Filename", char*) ;
+ SetValue_pv(subname, "Subname", char *) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+ SetValue_iv(info.flags, "Property") ;
+ ZMALLOC(db, BerkeleyDB_type) ;
+ if ((sv = readHash(hash, "Compare")) && sv != &PL_sv_undef) {
+ info.bt_compare = btree_compare ;
+ db->compare = newSVsv(sv) ;
+ }
+ /* DB_DUPSORT was introduced in DB 2.5.9 */
+ if ((sv = readHash(hash, "DupCompare")) && sv != &PL_sv_undef) {
+#ifdef DB_DUPSORT
+ info.dup_compare = dup_compare ;
+ db->dup_compare = newSVsv(sv) ;
+ info.flags |= DB_DUP|DB_DUPSORT ;
+#else
+ softCrash("DupCompare needs Berkeley DB 2.5.9 or later") ;
+#endif
+ }
+ if ((sv = readHash(hash, "Prefix")) && sv != &PL_sv_undef) {
+ info.bt_prefix = btree_prefix ;
+ db->prefix = newSVsv(sv) ;
+ }
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_BTREE, flags, mode, &info) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+HV *
+db_stat(db, flags=0)
+ int flags
+ BerkeleyDB::Common db
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+ DB_BTREE_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "bt_magic", stat->bt_magic);
+ hv_store_iv(RETVAL, "bt_version", stat->bt_version);
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "bt_metaflags", stat->bt_metaflags) ;
+ hv_store_iv(RETVAL, "bt_flags", stat->bt_metaflags) ;
+#else
+ hv_store_iv(RETVAL, "bt_flags", stat->bt_flags) ;
+#endif
+ hv_store_iv(RETVAL, "bt_maxkey", stat->bt_maxkey) ;
+ hv_store_iv(RETVAL, "bt_minkey", stat->bt_minkey);
+ hv_store_iv(RETVAL, "bt_re_len", stat->bt_re_len);
+ hv_store_iv(RETVAL, "bt_re_pad", stat->bt_re_pad);
+ hv_store_iv(RETVAL, "bt_pagesize", stat->bt_pagesize);
+ hv_store_iv(RETVAL, "bt_levels", stat->bt_levels);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "bt_nkeys", stat->bt_nkeys);
+ hv_store_iv(RETVAL, "bt_ndata", stat->bt_ndata);
+#else
+ hv_store_iv(RETVAL, "bt_nrecs", stat->bt_nrecs);
+#endif
+ hv_store_iv(RETVAL, "bt_int_pg", stat->bt_int_pg);
+ hv_store_iv(RETVAL, "bt_leaf_pg", stat->bt_leaf_pg);
+ hv_store_iv(RETVAL, "bt_dup_pg", stat->bt_dup_pg);
+ hv_store_iv(RETVAL, "bt_over_pg", stat->bt_over_pg);
+ hv_store_iv(RETVAL, "bt_free", stat->bt_free);
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+ hv_store_iv(RETVAL, "bt_freed", stat->bt_freed);
+ hv_store_iv(RETVAL, "bt_pfxsaved", stat->bt_pfxsaved);
+ hv_store_iv(RETVAL, "bt_split", stat->bt_split);
+ hv_store_iv(RETVAL, "bt_rootsplit", stat->bt_rootsplit);
+ hv_store_iv(RETVAL, "bt_fastsplit", stat->bt_fastsplit);
+ hv_store_iv(RETVAL, "bt_added", stat->bt_added);
+ hv_store_iv(RETVAL, "bt_deleted", stat->bt_deleted);
+ hv_store_iv(RETVAL, "bt_get", stat->bt_get);
+ hv_store_iv(RETVAL, "bt_cache_hit", stat->bt_cache_hit);
+ hv_store_iv(RETVAL, "bt_cache_miss", stat->bt_cache_miss);
+#endif
+ hv_store_iv(RETVAL, "bt_int_pgfree", stat->bt_int_pgfree);
+ hv_store_iv(RETVAL, "bt_leaf_pgfree", stat->bt_leaf_pgfree);
+ hv_store_iv(RETVAL, "bt_dup_pgfree", stat->bt_dup_pgfree);
+ hv_store_iv(RETVAL, "bt_over_pgfree", stat->bt_over_pgfree);
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Recno PACKAGE = BerkeleyDB::Recno PREFIX = recno_
+
+BerkeleyDB::Recno::Raw
+_db_open_recno(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Fname", char*) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+
+ SetValue_iv(info.flags, "Property") ;
+ SetValue_pv(info.re_source, "Source", char*) ;
+ if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
+ info.re_len = SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_FIXEDLEN) ;
+ }
+ if ((sv = readHash(hash, "Delim")) && sv != &PL_sv_undef) {
+ info.re_delim = SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_DELIMITER) ;
+ }
+ if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
+ info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ ZMALLOC(db, BerkeleyDB_type) ;
+#ifdef ALLOW_RECNO_OFFSET
+ SetValue_iv(db->array_base, "ArrayBase") ;
+ db->array_base = (db->array_base == 0 ? 1 : 0) ;
+#endif /* ALLOW_RECNO_OFFSET */
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_RECNO, flags, mode, &info) ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Queue PACKAGE = BerkeleyDB::Queue PREFIX = recno_
+
+BerkeleyDB::Queue::Raw
+_db_open_queue(self, ref)
+ char * self
+ SV * ref
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3
+ softCrash("BerkeleyDB::Queue needs Berkeley DB 3.0.x or better");
+#else
+ HV * hash ;
+ SV * sv ;
+ DB_INFO info ;
+ BerkeleyDB__Env dbenv = NULL;
+ SV * ref_dbenv = NULL;
+ const char * file = NULL ;
+ const char * subname = NULL ;
+ int flags = 0 ;
+ int mode = 0 ;
+ BerkeleyDB db ;
+
+ hash = (HV*) SvRV(ref) ;
+ SetValue_pv(file, "Fname", char*) ;
+ SetValue_ov(dbenv, "Env", BerkeleyDB__Env) ;
+ ref_dbenv = sv ;
+ SetValue_iv(flags, "Flags") ;
+ SetValue_iv(mode, "Mode") ;
+
+ Zero(&info, 1, DB_INFO) ;
+ SetValue_iv(info.db_cachesize, "Cachesize") ;
+ SetValue_iv(info.db_lorder, "Lorder") ;
+ SetValue_iv(info.db_pagesize, "Pagesize") ;
+ SetValue_iv(info.bt_minkey, "Minkey") ;
+ SetValue_iv(info.q_extentsize, "ExtentSize") ;
+
+
+ SetValue_iv(info.flags, "Property") ;
+ if ((sv = readHash(hash, "Len")) && sv != &PL_sv_undef) {
+ info.re_len = SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ if ((sv = readHash(hash, "Pad")) && sv != &PL_sv_undef) {
+ info.re_pad = (u_int32_t)SvPOK(sv) ? *SvPV(sv,PL_na) : SvIV(sv) ; ;
+ flagSet_DB2(info.flags, DB_PAD) ;
+ }
+ ZMALLOC(db, BerkeleyDB_type) ;
+#ifdef ALLOW_RECNO_OFFSET
+ SetValue_iv(db->array_base, "ArrayBase") ;
+ db->array_base = (db->array_base == 0 ? 1 : 0) ;
+#endif /* ALLOW_RECNO_OFFSET */
+
+ RETVAL = my_db_open(db, ref, ref_dbenv, dbenv, file, subname, DB_QUEUE, flags, mode, &info) ;
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+HV *
+db_stat(db, flags=0)
+ int flags
+ BerkeleyDB::Common db
+ HV * RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2
+ softCrash("$db->db_stat for a Queue needs Berkeley DB 3.x or better") ;
+#else /* Berkeley DB 3, or better */
+ DB_QUEUE_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, flags) ;
+#else
+ db->Status = ((db->dbp)->stat)(db->dbp, &stat, safemalloc, flags) ;
+#endif
+ if (db->Status == 0) {
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "qs_magic", stat->qs_magic) ;
+ hv_store_iv(RETVAL, "qs_version", stat->qs_version);
+#ifdef AT_LEAST_DB_3_1
+ hv_store_iv(RETVAL, "qs_nkeys", stat->qs_nkeys);
+ hv_store_iv(RETVAL, "qs_ndata", stat->qs_ndata);
+#else
+ hv_store_iv(RETVAL, "qs_nrecs", stat->qs_nrecs);
+#endif
+ hv_store_iv(RETVAL, "qs_pages", stat->qs_pages);
+ hv_store_iv(RETVAL, "qs_pagesize", stat->qs_pagesize);
+ hv_store_iv(RETVAL, "qs_pgfree", stat->qs_pgfree);
+ hv_store_iv(RETVAL, "qs_re_len", stat->qs_re_len);
+ hv_store_iv(RETVAL, "qs_re_pad", stat->qs_re_pad);
+#ifdef AT_LEAST_DB_3_2
+#else
+ hv_store_iv(RETVAL, "qs_start", stat->qs_start);
+#endif
+ hv_store_iv(RETVAL, "qs_first_recno", stat->qs_first_recno);
+ hv_store_iv(RETVAL, "qs_cur_recno", stat->qs_cur_recno);
+#if DB_VERSION_MAJOR >= 3
+ hv_store_iv(RETVAL, "qs_metaflags", stat->qs_metaflags);
+#endif
+ safefree(stat) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB::Common PACKAGE = BerkeleyDB::Common PREFIX = dab_
+
+
+DualType
+db_close(db,flags=0)
+ int flags
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ CODE:
+ Trace(("BerkeleyDB::Common::db_close %d\n", db));
+#ifdef STRICT_CLOSE
+ if (db->txn)
+ softCrash("attempted to close a database while a transaction was still open") ;
+ if (db->open_cursors)
+ softCrash("attempted to close a database with %d open cursor(s)",
+ db->open_cursors) ;
+#endif /* STRICT_CLOSE */
+ RETVAL = db->Status = ((db->dbp)->close)(db->dbp, flags) ;
+ if (db->parent_env && db->parent_env->open_dbs)
+ -- db->parent_env->open_dbs ;
+ db->active = FALSE ;
+ hash_delete("BerkeleyDB::Term::Db", (char *)db) ;
+ -- db->open_cursors ;
+ Trace(("end of BerkeleyDB::Common::db_close\n"));
+ OUTPUT:
+ RETVAL
+
+void
+dab__DESTROY(db)
+ BerkeleyDB::Common db
+ CODE:
+ CurrentDB = db ;
+ Trace(("In BerkeleyDB::Common::_DESTROY db %d dirty=%d\n", db, PL_dirty)) ;
+ destroyDB(db) ;
+ Trace(("End of BerkeleyDB::Common::DESTROY \n")) ;
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+#define db_cursor(db, txn, cur,flags) ((db->dbp)->cursor)(db->dbp, txn, cur)
+#else
+#define db_cursor(db, txn, cur,flags) ((db->dbp)->cursor)(db->dbp, txn, cur,flags)
+#endif
+BerkeleyDB::Cursor::Raw
+_db_cursor(db, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Common db
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+ DBC * cursor ;
+ CurrentDB = db ;
+ if ((db->Status = db_cursor(db, db->txn, &cursor, flags)) == 0){
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->open_cursors ++ ;
+ RETVAL->parent_db = db ;
+ RETVAL->cursor = cursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->recno_or_queue = db->recno_or_queue ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+BerkeleyDB::Cursor::Raw
+_db_join(db, cursors, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Common db
+ AV * cursors
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#if DB_VERSION_MAJOR == 2 && (DB_VERSION_MINOR < 5 || (DB_VERSION_MINOR == 5 && DB_VERSION_PATCH < 2))
+ softCrash("join needs Berkeley DB 2.5.2 or later") ;
+#else /* Berkeley DB >= 2.5.2 */
+ DBC * join_cursor ;
+ DBC ** cursor_list ;
+ I32 count = av_len(cursors) + 1 ;
+ int i ;
+ CurrentDB = db ;
+ if (count < 1 )
+ softCrash("db_join: No cursors in parameter list") ;
+ cursor_list = (DBC **)safemalloc(sizeof(DBC*) * (count + 1));
+ for (i = 0 ; i < count ; ++i) {
+ SV * obj = (SV*) * av_fetch(cursors, i, FALSE) ;
+ IV tmp = SvIV(getInnerObject(obj)) ;
+ BerkeleyDB__Cursor cur = INT2PTR(BerkeleyDB__Cursor, tmp);
+ cursor_list[i] = cur->cursor ;
+ }
+ cursor_list[i] = NULL ;
+#if DB_VERSION_MAJOR == 2
+ if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, flags, &join_cursor)) == 0){
+#else
+ if ((db->Status = ((db->dbp)->join)(db->dbp, cursor_list, &join_cursor, flags)) == 0){
+#endif
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->open_cursors ++ ;
+ RETVAL->parent_db = db ;
+ RETVAL->cursor = join_cursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
+ }
+ safefree(cursor_list) ;
+#endif /* Berkeley DB >= 2.5.2 */
+ }
+ OUTPUT:
+ RETVAL
+
+int
+ArrayOffset(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL = db->array_base ? 0 : 1 ;
+#else
+ RETVAL = 0 ;
+#endif /* ALLOW_RECNO_OFFSET */
+ OUTPUT:
+ RETVAL
+
+int
+type(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ RETVAL = db->type ;
+ OUTPUT:
+ RETVAL
+
+int
+byteswapped(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+ softCrash("byteswapped needs Berkeley DB 2.5 or later") ;
+#else
+#if DB_VERSION_MAJOR == 2
+ RETVAL = db->dbp->byteswapped ;
+#else
+#ifdef AT_LEAST_DB_3_3
+ db->dbp->get_byteswapped(db->dbp, &RETVAL) ;
+#else
+ RETVAL = db->dbp->get_byteswapped(db->dbp) ;
+#endif
+#endif
+#endif
+ OUTPUT:
+ RETVAL
+
+DualType
+status(db)
+ BerkeleyDB::Common db
+ CODE:
+ RETVAL = db->Status ;
+ OUTPUT:
+ RETVAL
+
+#ifdef DBM_FILTERING
+
+#define setFilter(ftype) \
+ { \
+ if (db->ftype) \
+ RETVAL = sv_mortalcopy(db->ftype) ; \
+ ST(0) = RETVAL ; \
+ if (db->ftype && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db->ftype) ; \
+ db->ftype = NULL ; \
+ } \
+ else if (code) { \
+ if (db->ftype) \
+ sv_setsv(db->ftype, code) ; \
+ else \
+ db->ftype = newSVsv(code) ; \
+ } \
+ }
+
+
+SV *
+filter_fetch_key(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_fetch_key) ;
+
+SV *
+filter_store_key(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_store_key) ;
+
+SV *
+filter_fetch_value(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_fetch_value) ;
+
+SV *
+filter_store_value(db, code)
+ BerkeleyDB::Common db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_store_value) ;
+
+#endif /* DBM_FILTERING */
+
+void
+partial_set(db, offset, length)
+ BerkeleyDB::Common db
+ u_int32_t offset
+ u_int32_t length
+ INIT:
+ ckActive_Database(db->active) ;
+ PPCODE:
+ if (GIMME == G_ARRAY) {
+ XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
+ XPUSHs(sv_2mortal(newSViv(db->doff))) ;
+ XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
+ }
+ db->partial = DB_DBT_PARTIAL ;
+ db->doff = offset ;
+ db->dlen = length ;
+
+
+void
+partial_clear(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ PPCODE:
+ if (GIMME == G_ARRAY) {
+ XPUSHs(sv_2mortal(newSViv(db->partial == DB_DBT_PARTIAL))) ;
+ XPUSHs(sv_2mortal(newSViv(db->doff))) ;
+ XPUSHs(sv_2mortal(newSViv(db->dlen))) ;
+ }
+ db->partial =
+ db->doff =
+ db->dlen = 0 ;
+
+
+#define db_del(db, key, flags) \
+ (db->Status = ((db->dbp)->del)(db->dbp, db->txn, &key, flags))
+DualType
+db_del(db, key, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY key
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+
+
+#define db_get(db, key, data, flags) \
+ (db->Status = ((db->dbp)->get)(db->dbp, db->txn, &key, &data, flags))
+DualType
+db_get(db, key, data, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY_B key
+ DBT_OPT data
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ SetPartial(data,db) ;
+ OUTPUT:
+ key if (flagSet(DB_SET_RECNO)) OutputValue(ST(1), key) ;
+ data
+
+#define db_put(db,key,data,flag) \
+ (db->Status = (db->dbp->put)(db->dbp,db->txn,&key,&data,flag))
+DualType
+db_put(db, key, data, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ DBTKEY key
+ DBT data
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ /* SetPartial(data,db) ; */
+ OUTPUT:
+ key if (flagSet(DB_APPEND)) OutputKey(ST(1), key) ;
+
+#define db_key_range(db, key, range, flags) \
+ (db->Status = ((db->dbp)->key_range)(db->dbp, db->txn, &key, &range, flags))
+DualType
+db_key_range(db, key, less, equal, greater, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Common db
+ DBTKEY_B key
+ double less = 0.0 ;
+ double equal = 0.0 ;
+ double greater = 0.0 ;
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3_1
+ softCrash("key_range needs Berkeley DB 3.1.x or later") ;
+#else
+ DB_KEY_RANGE range ;
+ range.less = range.equal = range.greater = 0.0 ;
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+ RETVAL = db_key_range(db, key, range, flags);
+ if (RETVAL == 0) {
+ less = range.less ;
+ equal = range.equal;
+ greater = range.greater;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+ less
+ equal
+ greater
+
+
+#define db_fd(d, x) (db->Status = (db->dbp->fd)(db->dbp, &x))
+DualType
+db_fd(db)
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ CurrentDB = db ;
+ db_fd(db, RETVAL) ;
+ OUTPUT:
+ RETVAL
+
+
+#define db_sync(db, fl) (db->Status = (db->dbp->sync)(db->dbp, fl))
+DualType
+db_sync(db, flags=0)
+ u_int flags
+ BerkeleyDB::Common db
+ INIT:
+ ckActive_Database(db->active) ;
+ CurrentDB = db ;
+
+void
+_Txn(db, txn=NULL)
+ BerkeleyDB::Common db
+ BerkeleyDB::Txn txn
+ INIT:
+ ckActive_Database(db->active) ;
+ CODE:
+ if (txn) {
+ Trace(("_Txn(%d in %d) active [%d]\n", txn->txn, txn, txn->active));
+ ckActive_Transaction(txn->active) ;
+ db->txn = txn->txn ;
+ }
+ else {
+ Trace(("_Txn(undef) \n"));
+ db->txn = NULL ;
+ }
+
+
+
+
+MODULE = BerkeleyDB::Cursor PACKAGE = BerkeleyDB::Cursor PREFIX = cu_
+
+BerkeleyDB::Cursor::Raw
+_c_dup(db, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Cursor db
+ BerkeleyDB::Cursor RETVAL = NULL ;
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Database(db->active) ;
+ CODE:
+ {
+#ifndef AT_LEAST_DB_3
+ softCrash("c_dup needs at least Berkeley DB 3.0.x");
+#else
+ DBC * newcursor ;
+ db->Status = ((db->cursor)->c_dup)(db->cursor, &newcursor, flags) ;
+ if (db->Status == 0){
+ ZMALLOC(RETVAL, BerkeleyDB__Cursor_type) ;
+ db->parent_db->open_cursors ++ ;
+ RETVAL->parent_db = db->parent_db ;
+ RETVAL->cursor = newcursor ;
+ RETVAL->dbp = db->dbp ;
+ RETVAL->type = db->type ;
+ RETVAL->recno_or_queue = db->recno_or_queue ;
+ RETVAL->filename = my_strdup(db->filename) ;
+ RETVAL->compare = db->compare ;
+ RETVAL->dup_compare = db->dup_compare ;
+ RETVAL->prefix = db->prefix ;
+ RETVAL->hash = db->hash ;
+ RETVAL->partial = db->partial ;
+ RETVAL->doff = db->doff ;
+ RETVAL->dlen = db->dlen ;
+ RETVAL->active = TRUE ;
+#ifdef ALLOW_RECNO_OFFSET
+ RETVAL->array_base = db->array_base ;
+#endif /* ALLOW_RECNO_OFFSET */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = FALSE ;
+ RETVAL->filter_fetch_key = db->filter_fetch_key ;
+ RETVAL->filter_store_key = db->filter_store_key ;
+ RETVAL->filter_fetch_value = db->filter_fetch_value ;
+ RETVAL->filter_store_value = db->filter_store_value ;
+#endif /* DBM_FILTERING */
+ /* RETVAL->info ; */
+ hash_store_iv("BerkeleyDB::Term::Cursor", (char *)RETVAL, 1) ;
+ }
+#endif
+ }
+ OUTPUT:
+ RETVAL
+
+DualType
+_c_close(db)
+ BerkeleyDB::Cursor db
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
+ CODE:
+ RETVAL = db->Status =
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->active = FALSE ;
+ if (db->parent_db->open_cursors)
+ -- db->parent_db->open_cursors ;
+ OUTPUT:
+ RETVAL
+
+void
+_DESTROY(db)
+ BerkeleyDB::Cursor db
+ CODE:
+ CurrentDB = db->parent_db ;
+ Trace(("In BerkeleyDB::Cursor::_DESTROY db %d dirty=%d active=%d\n", db, PL_dirty, db->active));
+ hash_delete("BerkeleyDB::Term::Cursor", (char *)db) ;
+ if (db->active)
+ ((db->cursor)->c_close)(db->cursor) ;
+ if (db->parent_db->open_cursors)
+ -- db->parent_db->open_cursors ;
+ Safefree(db->filename) ;
+ Safefree(db) ;
+ Trace(("End of BerkeleyDB::Cursor::_DESTROY\n")) ;
+
+DualType
+status(db)
+ BerkeleyDB::Cursor db
+ CODE:
+ RETVAL = db->Status ;
+ OUTPUT:
+ RETVAL
+
+
+#define cu_c_del(c,f) (c->Status = ((c->cursor)->c_del)(c->cursor,f))
+DualType
+cu_c_del(db, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ OUTPUT:
+ RETVAL
+
+
+#define cu_c_get(c,k,d,f) (c->Status = (c->cursor->c_get)(c->cursor,&k,&d,f))
+DualType
+cu_c_get(db, key, data, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ DBTKEY_B key
+ DBT_B data
+ INIT:
+ Trace(("c_get db [%d] flags [%d]\n", db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ SetPartial(data,db) ;
+ Trace(("c_get end\n")) ;
+ OUTPUT:
+ RETVAL
+ key
+ data if (! flagSet(DB_JOIN_ITEM)) OutputValue_B(ST(2), data) ;
+
+
+#define cu_c_put(c,k,d,f) (c->Status = (c->cursor->c_put)(c->cursor,&k,&d,f))
+DualType
+cu_c_put(db, key, data, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ DBTKEY key
+ DBT data
+ INIT:
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ /* SetPartial(data,db) ; */
+ OUTPUT:
+ RETVAL
+
+#define cu_c_count(c,p,f) (c->Status = (c->cursor->c_count)(c->cursor,&p,f))
+DualType
+cu_c_count(db, count, flags=0)
+ int flags
+ BerkeleyDB::Cursor db
+ u_int32_t count = NO_INIT
+ CODE:
+#ifndef AT_LEAST_DB_3_1
+ softCrash("c_count needs at least Berkeley DB 3.1.x");
+#else
+ Trace(("c_get count [%d] flags [%d]\n", db, flags)) ;
+ CurrentDB = db->parent_db ;
+ ckActive_Cursor(db->active) ;
+ RETVAL = cu_c_count(db, count, flags) ;
+ Trace((" c_count got %d duplicates\n", count)) ;
+#endif
+ OUTPUT:
+ RETVAL
+ count
+
+MODULE = BerkeleyDB::TxnMgr PACKAGE = BerkeleyDB::TxnMgr PREFIX = xx_
+
+BerkeleyDB::Txn::Raw
+_txn_begin(txnmgr, pid=NULL, flags=0)
+ u_int32_t flags
+ BerkeleyDB::TxnMgr txnmgr
+ BerkeleyDB::Txn pid
+ CODE:
+ {
+ DB_TXN *txn ;
+ DB_TXN *p_id = NULL ;
+#if DB_VERSION_MAJOR == 2
+ if (txnmgr->env->Env->tx_info == NULL)
+ softCrash("Transaction Manager not enabled") ;
+#endif
+ if (pid)
+ p_id = pid->txn ;
+ txnmgr->env->TxnMgrStatus =
+#if DB_VERSION_MAJOR == 2
+ txn_begin(txnmgr->env->Env->tx_info, p_id, &txn) ;
+#else
+ txn_begin(txnmgr->env->Env, p_id, &txn, flags) ;
+#endif
+ if (txnmgr->env->TxnMgrStatus == 0) {
+ ZMALLOC(RETVAL, BerkeleyDB_Txn_type) ;
+ RETVAL->txn = txn ;
+ RETVAL->active = TRUE ;
+ Trace(("_txn_begin created txn [%d] in [%d]\n", txn, RETVAL));
+ hash_store_iv("BerkeleyDB::Term::Txn", (char *)RETVAL, 1) ;
+ }
+ else
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+
+DualType
+status(mgr)
+ BerkeleyDB::TxnMgr mgr
+ CODE:
+ RETVAL = mgr->env->TxnMgrStatus ;
+ OUTPUT:
+ RETVAL
+
+
+void
+_DESTROY(mgr)
+ BerkeleyDB::TxnMgr mgr
+ CODE:
+ Trace(("In BerkeleyDB::TxnMgr::DESTROY dirty=%d\n", PL_dirty)) ;
+ Safefree(mgr) ;
+ Trace(("End of BerkeleyDB::TxnMgr::DESTROY\n")) ;
+
+DualType
+txn_close(txnp)
+ BerkeleyDB::TxnMgr txnp
+ NOT_IMPLEMENTED_YET
+
+
+#if DB_VERSION_MAJOR == 2
+# define xx_txn_checkpoint(t,k,m) txn_checkpoint(t->env->Env->tx_info, k, m)
+#else
+# ifdef AT_LEAST_DB_3_1
+# define xx_txn_checkpoint(t,k,m) txn_checkpoint(t->env->Env, k, m, 0)
+# else
+# define xx_txn_checkpoint(t,k,m) txn_checkpoint(t->env->Env, k, m)
+# endif
+#endif
+DualType
+xx_txn_checkpoint(txnp, kbyte, min)
+ BerkeleyDB::TxnMgr txnp
+ long kbyte
+ long min
+
+HV *
+txn_stat(txnp)
+ BerkeleyDB::TxnMgr txnp
+ HV * RETVAL = NULL ;
+ CODE:
+ {
+ DB_TXN_STAT * stat ;
+#ifdef AT_LEAST_DB_3_3
+ if(txn_stat(txnp->env->Env, &stat) == 0) {
+#else
+#if DB_VERSION_MAJOR == 2
+ if(txn_stat(txnp->env->Env->tx_info, &stat, safemalloc) == 0) {
+#else
+ if(txn_stat(txnp->env->Env, &stat, safemalloc) == 0) {
+#endif
+#endif
+ RETVAL = (HV*)sv_2mortal((SV*)newHV()) ;
+ hv_store_iv(RETVAL, "st_time_ckp", stat->st_time_ckp) ;
+ hv_store_iv(RETVAL, "st_last_txnid", stat->st_last_txnid) ;
+ hv_store_iv(RETVAL, "st_maxtxns", stat->st_maxtxns) ;
+ hv_store_iv(RETVAL, "st_naborts", stat->st_naborts) ;
+ hv_store_iv(RETVAL, "st_nbegins", stat->st_nbegins) ;
+ hv_store_iv(RETVAL, "st_ncommits", stat->st_ncommits) ;
+ hv_store_iv(RETVAL, "st_nactive", stat->st_nactive) ;
+#if DB_VERSION_MAJOR > 2
+ hv_store_iv(RETVAL, "st_maxnactive", stat->st_maxnactive) ;
+ hv_store_iv(RETVAL, "st_regsize", stat->st_regsize) ;
+ hv_store_iv(RETVAL, "st_region_wait", stat->st_region_wait) ;
+ hv_store_iv(RETVAL, "st_region_nowait", stat->st_region_nowait) ;
+#endif
+ safefree(stat) ;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+
+BerkeleyDB::TxnMgr
+txn_open(dir, flags, mode, dbenv)
+ int flags
+ const char * dir
+ int mode
+ BerkeleyDB::Env dbenv
+ NOT_IMPLEMENTED_YET
+
+
+MODULE = BerkeleyDB::Txn PACKAGE = BerkeleyDB::Txn PREFIX = xx_
+
+DualType
+status(tid)
+ BerkeleyDB::Txn tid
+ CODE:
+ RETVAL = tid->Status ;
+ OUTPUT:
+ RETVAL
+
+int
+_DESTROY(tid)
+ BerkeleyDB::Txn tid
+ CODE:
+ Trace(("In BerkeleyDB::Txn::_DESTROY txn [%d] active [%d] dirty=%d\n", tid->txn, tid->active, PL_dirty)) ;
+ if (tid->active)
+ txn_abort(tid->txn) ;
+ RETVAL = (int)tid ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ Safefree(tid) ;
+ Trace(("End of BerkeleyDB::Txn::DESTROY\n")) ;
+ OUTPUT:
+ RETVAL
+
+#define xx_txn_unlink(d,f,e) txn_unlink(d,f,&(e->Env))
+DualType
+xx_txn_unlink(dir, force, dbenv)
+ const char * dir
+ int force
+ BerkeleyDB::Env dbenv
+ NOT_IMPLEMENTED_YET
+
+#ifdef AT_LEAST_DB_3_3
+#define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn, 0))
+#else
+#define xx_txn_prepare(t) (t->Status = txn_prepare(t->txn))
+#endif
+DualType
+xx_txn_prepare(tid)
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+
+#if DB_VERSION_MAJOR == 2
+# define _txn_commit(t,flags) (t->Status = txn_commit(t->txn))
+#else
+# define _txn_commit(t, flags) (t->Status = txn_commit(t->txn, flags))
+#endif
+DualType
+_txn_commit(tid, flags=0)
+ u_int32_t flags
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ tid->active = FALSE ;
+
+#define _txn_abort(t) (t->Status = txn_abort(t->txn))
+DualType
+_txn_abort(tid)
+ BerkeleyDB::Txn tid
+ INIT:
+ ckActive_Transaction(tid->active) ;
+ hash_delete("BerkeleyDB::Term::Txn", (char *)tid) ;
+ tid->active = FALSE ;
+
+#define xx_txn_id(t) txn_id(t->txn)
+u_int32_t
+xx_txn_id(tid)
+ BerkeleyDB::Txn tid
+
+MODULE = BerkeleyDB::_tiedHash PACKAGE = BerkeleyDB::_tiedHash
+
+int
+FIRSTKEY(db)
+ BerkeleyDB::Common db
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ DBC * cursor ;
+
+ /*
+ TODO!
+ set partial value to 0 - to eliminate the retrieval of
+ the value need to store any existing partial settings &
+ restore at the end.
+
+ */
+ CurrentDB = db ;
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ /* If necessary create a cursor for FIRSTKEY/NEXTKEY use */
+ if (!db->cursor &&
+ (db->Status = db_cursor(db, db->txn, &cursor, 0)) == 0 )
+ db->cursor = cursor ;
+
+ if (db->cursor)
+ RETVAL = (db->Status) =
+ ((db->cursor)->c_get)(db->cursor, &key, &value, DB_FIRST);
+ else
+ RETVAL = db->Status ;
+ /* check for end of cursor */
+ if (RETVAL == DB_NOTFOUND) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->cursor = NULL ;
+ }
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key)
+ }
+
+
+
+int
+NEXTKEY(db, key)
+ BerkeleyDB::Common db
+ DBTKEY key
+ CODE:
+ {
+ DBT value ;
+
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ key.flags = 0 ;
+ RETVAL = (db->Status) =
+ ((db->cursor)->c_get)(db->cursor, &key, &value, DB_NEXT);
+
+ /* check for end of cursor */
+ if (RETVAL == DB_NOTFOUND) {
+ ((db->cursor)->c_close)(db->cursor) ;
+ db->cursor = NULL ;
+ }
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key)
+ }
+
+MODULE = BerkeleyDB::_tiedArray PACKAGE = BerkeleyDB::_tiedArray
+
+I32
+FETCHSIZE(db)
+ BerkeleyDB::Common db
+ CODE:
+ CurrentDB = db ;
+ RETVAL = GetArrayLength(db) ;
+ OUTPUT:
+ RETVAL
+
+
+MODULE = BerkeleyDB PACKAGE = BerkeleyDB
+
+BOOT:
+ {
+ SV * sv_err = perl_get_sv(ERR_BUFF, GV_ADD|GV_ADDMULTI) ;
+ SV * version_sv = perl_get_sv("BerkeleyDB::db_version", GV_ADD|GV_ADDMULTI) ;
+ SV * ver_sv = perl_get_sv("BerkeleyDB::db_ver", GV_ADD|GV_ADDMULTI) ;
+ int Major, Minor, Patch ;
+ (void)db_version(&Major, &Minor, &Patch) ;
+ /* Check that the versions of db.h and libdb.a are the same */
+ if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR
+ || Patch != DB_VERSION_PATCH)
+ croak("\nBerkeleyDB needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ Major, Minor, Patch) ;
+
+ if (Major < 2 || (Major == 2 && Minor < 6))
+ {
+ croak("BerkeleyDB needs Berkeley DB 2.6 or greater. This is %d.%d.%d\n",
+ Major, Minor, Patch) ;
+ }
+ sv_setpvf(version_sv, "%d.%d", Major, Minor) ;
+ sv_setpvf(ver_sv, "%d.%03d%03d", Major, Minor, Patch) ;
+ sv_setpv(sv_err, "");
+
+ DBT_clear(empty) ;
+ empty.data = &zero ;
+ empty.size = sizeof(db_recno_t) ;
+ empty.flags = 0 ;
+
+ }
+
diff --git a/db/perl/BerkeleyDB/BerkeleyDB/Btree.pm b/db/perl/BerkeleyDB/BerkeleyDB/Btree.pm
new file mode 100644
index 000000000..ba9a9c008
--- /dev/null
+++ b/db/perl/BerkeleyDB/BerkeleyDB/Btree.pm
@@ -0,0 +1,8 @@
+
+package BerkeleyDB::Btree ;
+
+# This file is only used for MLDBM
+
+use BerkeleyDB ;
+
+1 ;
diff --git a/db/perl/BerkeleyDB/BerkeleyDB/Hash.pm b/db/perl/BerkeleyDB/BerkeleyDB/Hash.pm
new file mode 100644
index 000000000..8e7bc7e78
--- /dev/null
+++ b/db/perl/BerkeleyDB/BerkeleyDB/Hash.pm
@@ -0,0 +1,8 @@
+
+package BerkeleyDB::Hash ;
+
+# This file is only used for MLDBM
+
+use BerkeleyDB ;
+
+1 ;
diff --git a/db/perl/BerkeleyDB/Changes b/db/perl/BerkeleyDB/Changes
new file mode 100644
index 000000000..f647bea85
--- /dev/null
+++ b/db/perl/BerkeleyDB/Changes
@@ -0,0 +1,129 @@
+Revision history for Perl extension BerkeleyDB.
+
+0.16 26 1 August 2001
+ * added support for Berkeley DB 3.3.x (but no support for any of the
+ new features just yet)
+
+0.15 26 April 2001
+ * Fixed a bug in the processing of the flags options in
+ db_key_range.
+ * added support for set_lg_max & set_lg_bsize
+ * allow DB_TMP_DIR and DB_TEMP_DIR
+ * the -Filename parameter to BerkeleyDB::Queue didn't work.
+ * added symbol DB_CONSUME_WAIT
+
+0.14 21st January 2001
+ * Silenced the warnings when build with a 64-bit Perl.
+ * Can now build with DB 3.2.3h (part of MySQL). The test harness
+ takes an age to do the queue test, but it does eventually pass.
+ * Mentioned the problems that occur when perl is built with sfio.
+
+0.13 15th January 2001
+ * Added support to allow this module to build with Berkeley DB 3.2
+ * Updated dbinfo to support Berkeley DB 3.1 & 3.2 file format
+ changes.
+ * Documented the Solaris 2.7 core dump problem in README.
+ * Tidied up the test harness to fix a problem on Solaris where the
+ "fred" directory wasn't being deleted when it should have been.
+ * two calls to "open" clashed with a win32 macro.
+ * size argument for hash_cb is different for Berkeley DB 3.x
+ * Documented the issue of building on Linux.
+ * Added -Server, -CacheSize & -LockDetect options
+ [original patch supplied by Graham Barr]
+ * Added support for set_mutexlocks, c_count, set_q_extentsize,
+ key_range, c_dup
+ * Dropped the "attempted to close a Cursor with an open transaction"
+ error in c_close. The correct behaviour is that the cursor
+ should be closed before committing/aborting the transaction.
+
+0.12 2nd August 2000
+ * Serious bug with get fixed. Spotted by Sleepycat.
+ * Added hints file for Solaris & Irix (courtesy of Albert Chin-A-Young)
+
+0.11 4th June 2000
+ * When built with Berkeley Db 3.x there can be a clash with the close
+ macro.
+ * Typo in the definition of DB_WRITECURSOR
+ * The flags parameter wasn't getting sent to db_cursor
+ * Plugged small memory leak in db_cursor (DESTROY wasn't freeing
+ memory)
+ * Can be built with Berkeley DB 3.1
+
+0.10 8th December 1999
+ * The DESTROY method was missing for BerkeleyDB::Env. This resulted in
+ a memory leak. Fixed.
+ * If opening an environment or database failed, there was a small
+ memory leak. This has been fixed.
+ * A thread-enabled Perl it could core when a database was closed.
+ Problem traced to the strdup function.
+
+0.09 29th November 1999
+ * the queue.t & subdb.t test harnesses were outputting a few
+ spurious warnings. This has been fixed.
+
+0.08 28nd November 1999
+ * More documentation updates
+ * Changed reference to files in /tmp in examples.t
+ * Fixed a typo in softCrash that caused problems when building
+ with a thread-enabled Perl.
+ * BerkeleyDB::Error wasn't initialised properly.
+ * ANSI-ified all the static C functions in BerkeleyDB.xs
+ * Added support for the following DB 3.x features:
+ + The Queue database type
+ + db_remove
+ + subdatabases
+ + db_stat for Hash & Queue
+
+0.07 21st September 1999
+ * Numerous small bug fixes.
+ * Added support for sorting duplicate values DB_DUPSORT.
+ * Added support for DB_GET_BOTH & DB_NEXT_DUP.
+ * Added get_dup (from DB_File).
+ * beefed up the documentation.
+ * Forgot to add the DB_INIT_CDB in BerkeleyDB.pm in previous release.
+ * Merged the DBM Filter code from DB_File into BerkeleyDB.
+ * Fixed a nasty bug where a closed transaction was still used with
+ with dp_put, db_get etc.
+ * Added logic to gracefully close everything whenever a fatal error
+ happens. Previously the plug was just pulled.
+ * It is now a fatal error to explicitly close an environment if there
+ is still an open database; a database when there are open cursors or
+ an open transaction; and a cursor if there is an open transaction.
+ Using object destruction doesn't have this issue, as object
+ references will ensure everything gets closed in the correct order.
+ * The BOOT code now checks that the version of db.h & libdb are the
+ same - this seems to be a common problem on Linux.
+ * MLDBM support added.
+ * Support for the new join cursor added.
+ * Builds with Berkeley DB 3.x
+ * Updated dbinfo for Berkeley DB 3.x file formats.
+ * Deprecated the TxnMgr class. As with Berkeley DB version 3,
+ txn_begin etc are now accessed via the environment object.
+
+0.06 19 December 1998
+ * Minor modifications to get the module to build with DB 2.6.x
+ * Added support for DB 2.6.x's Concurrent Access Method, DB_INIT_CDB.
+
+0.05 9 November 1998
+ * Added a note to README about how to build Berkeley DB 2.x
+ when using HP-UX.
+ * Minor modifications to get the module to build with DB 2.5.x
+
+0.04 19 May 1998
+ * Define DEFSV & SAVE_DEFSV if not already defined. This allows
+ the module to be built with Perl 5.004_04.
+
+0.03 5 May 1998
+ * fixed db_get with DB_SET_RECNO
+ * fixed c_get with DB_SET_RECNO and DB_GET_RECNO
+ * implemented BerkeleyDB::Unknown
+ * implemented BerkeleyDB::Recno, including push, pop etc
+ modified the txn support.
+
+0.02 30 October 1997
+ * renamed module to BerkeleyDB
+ * fixed a few bugs & added more tests
+
+0.01 23 October 1997
+ * first alpha release as BerkDB.
+
diff --git a/db/perl/BerkeleyDB/MANIFEST b/db/perl/BerkeleyDB/MANIFEST
new file mode 100644
index 000000000..ed8450f6f
--- /dev/null
+++ b/db/perl/BerkeleyDB/MANIFEST
@@ -0,0 +1,50 @@
+BerkeleyDB.pm
+BerkeleyDB.pod
+BerkeleyDB.pod.P
+BerkeleyDB.xs
+BerkeleyDB/Btree.pm
+BerkeleyDB/Hash.pm
+Changes
+config.in
+dbinfo
+hints/solaris.pl
+hints/irix_6_5.pl
+Makefile.PL
+MANIFEST
+mkconsts
+mkpod
+README
+t/btree.t
+t/db-3.0.t
+t/db-3.1.t
+t/db-3.2.t
+t/destroy.t
+t/env.t
+t/examples.t
+t/examples.t.T
+t/examples3.t
+t/examples3.t.T
+t/filter.t
+t/hash.t
+t/join.t
+t/mldbm.t
+t/queue.t
+t/recno.t
+t/strict.t
+t/subdb.t
+t/txn.t
+t/unknown.t
+t/util.pm
+Todo
+typemap
+patches/5.004
+patches/5.004_01
+patches/5.004_02
+patches/5.004_03
+patches/5.004_04
+patches/5.004_05
+patches/5.005
+patches/5.005_01
+patches/5.005_02
+patches/5.005_03
+patches/5.6.0
diff --git a/db/perl/BerkeleyDB/Makefile.PL b/db/perl/BerkeleyDB/Makefile.PL
new file mode 100644
index 000000000..c99283b20
--- /dev/null
+++ b/db/perl/BerkeleyDB/Makefile.PL
@@ -0,0 +1,123 @@
+#! perl -w
+
+# It should not be necessary to edit this file. The configuration for
+# BerkeleyDB is controlled from the file config.in
+
+
+BEGIN { die "BerkeleyDB needs Perl 5.004_04 or greater" if $] < 5.004_04 ; }
+
+use strict ;
+use ExtUtils::MakeMaker ;
+use Config ;
+
+# Check for the presence of sfio
+if ($Config{'d_sfio'}) {
+ print <<EOM;
+
+WARNING: Perl seems to have been built with SFIO support enabled.
+ Please read the SFIO Notes in the README file.
+
+EOM
+}
+
+my $LIB_DIR ;
+my $INC_DIR ;
+my $DB_NAME ;
+my $LIBS ;
+
+ParseCONFIG() ;
+
+if (defined $DB_NAME)
+ { $LIBS = $DB_NAME }
+else {
+ if ($^O eq 'MSWin32')
+ { $LIBS = '-llibdb' }
+ else
+ { $LIBS = '-ldb' }
+}
+
+# OS2 is a special case, so check for it now.
+my $OS2 = "" ;
+$OS2 = "-DOS2" if $^O eq 'os2' ;
+
+WriteMakefile(
+ NAME => 'BerkeleyDB',
+ LIBS => ["-L${LIB_DIR} $LIBS"],
+ MAN3PODS => ' ', # Pods will be built by installman.
+ INC => "-I$INC_DIR",
+ VERSION_FROM => 'BerkeleyDB.pm',
+ XSPROTOARG => '-noprototypes',
+ DEFINE => "$OS2",
+ #'macro' => { INSTALLDIRS => 'perl' },
+ 'dist' => {COMPRESS=>'gzip', SUFFIX=>'gz'},
+ ($] >= 5.005
+ ? (ABSTRACT_FROM => 'BerkeleyDB.pod',
+ AUTHOR => 'Paul Marquess <Paul.Marquess@btinternet.com>')
+ : ()
+ ),
+ );
+
+
+sub MY::postamble {
+ '
+$(NAME).pod: $(NAME).pod.P t/examples.t.T t/examples3.t.T mkpod
+ perl ./mkpod
+
+$(NAME).xs: typemap
+ @$(TOUCH) $(NAME).xs
+
+Makefile: config.in
+
+
+' ;
+}
+
+sub ParseCONFIG
+{
+ my ($k, $v) ;
+ my @badkey = () ;
+ my %Info = () ;
+ my @Options = qw( INCLUDE LIB DBNAME ) ;
+ my %ValidOption = map {$_, 1} @Options ;
+ my %Parsed = %ValidOption ;
+ my $CONFIG = 'config.in' ;
+
+ print "Parsing $CONFIG...\n" ;
+
+ # DBNAME is optional, so pretend it has been parsed.
+ delete $Parsed{'DBNAME'} ;
+
+ open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
+ while (<F>) {
+ s/^\s*|\s*$//g ;
+ next if /^\s*$/ or /^\s*#/ ;
+ s/\s*#\s*$// ;
+
+ ($k, $v) = split(/\s+=\s+/, $_, 2) ;
+ $k = uc $k ;
+ if ($ValidOption{$k}) {
+ delete $Parsed{$k} ;
+ $Info{$k} = $v ;
+ }
+ else {
+ push(@badkey, $k) ;
+ }
+ }
+ close F ;
+
+ print "Unknown keys in $CONFIG ignored [@badkey]\n"
+ if @badkey ;
+
+ # check parsed values
+ my @missing = () ;
+ die "The following keys are missing from $CONFIG file: [@missing]\n"
+ if @missing = keys %Parsed ;
+
+ $INC_DIR = $ENV{'BERKELEYDB_INCLUDE'} || $Info{'INCLUDE'} ;
+ $LIB_DIR = $ENV{'BERKELEYDB_LIB'} || $Info{'LIB'} ;
+ $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
+ print "Looks Good.\n" ;
+
+}
+
+# end of file Makefile.PL
diff --git a/db/perl/BerkeleyDB/README b/db/perl/BerkeleyDB/README
new file mode 100644
index 000000000..5784a1954
--- /dev/null
+++ b/db/perl/BerkeleyDB/README
@@ -0,0 +1,464 @@
+ BerkeleyDB
+
+ Version 0.16
+
+ 1st August 2001
+
+ Copyright (c) 1997-2001 Paul Marquess. All rights reserved. This
+ program is free software; you can redistribute it and/or modify
+ it under the same terms as Perl itself.
+
+
+DESCRIPTION
+-----------
+
+BerkeleyDB is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 2 or 3. (Note: if you want
+to use version 1 of Berkeley DB with Perl you need the DB_File module).
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. BerkeleyDB provides an interface to all
+four of the database types (hash, btree, queue and recno) currently
+supported by Berkeley DB.
+
+For further details see the documentation in the file BerkeleyDB.pod.
+
+PREREQUISITES
+-------------
+
+Before you can build BerkeleyDB you need to have the following
+installed on your system:
+
+ * Perl 5.004_04 or greater.
+
+ * Berkeley DB Version 2.6.4 or greater
+
+ The official web site for Berkeley DB is http://www.sleepycat.com
+
+ The latest version of Berkeley DB is always available there. It
+ is recommended that you use the most recent version available at
+ the Sleepycat site.
+
+ The one exception to this advice is where you want to use BerkeleyDB
+ to access database files created by a third-party application,
+ like Sendmail. In these cases you must build BerkeleyDB with a
+ compatible version of Berkeley DB.
+
+
+BUILDING THE MODULE
+-------------------
+
+Assuming you have met all the prerequisites, building the module should
+be relatively straightforward.
+
+Step 1 : If you are running Solaris 2.5, 2.7 or HP-UX 10 read either
+ the Solaris Notes or HP-UX Notes sections below.
+ If you are running Linux please read the Linux Notes section
+ before proceeding.
+
+
+Step 2 : Edit the file config.in to suit you local installation.
+ Instructions are given in the file.
+
+Step 3 : Build and test the module using this sequence of commands:
+
+ perl Makefile.PL
+ make
+ make test
+
+INSTALLATION
+------------
+
+ make install
+
+TROUBLESHOOTING
+===============
+
+Here are some of the problems that people encounter when building BerkeleyDB.
+
+Missing db.h or libdb.a
+-----------------------
+
+If you get an error like this:
+
+ cc -c -I./libraries/ -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ BerkeleyDB.xs:52: db.h: No such file or directory
+
+or this:
+
+ cc -c -I./libraries/2.7.5 -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ LD_RUN_PATH="/lib" cc -o blib/arch/auto/BerkeleyDB/BerkeleyDB.so -shared
+ -L/usr/local/lib BerkeleyDB.o
+ -L/home/paul/perl/ext/BerkDB/BerkeleyDB/libraries -ldb
+ ld: cannot open -ldb: No such file or directory
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB installed, but it isn't in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables to point
+ to the directories where libdb.a and db.h are installed.
+
+Wrong db.h
+----------
+
+If you get an error like this when building this module:
+
+ cc -c -I./libraries/ -Dbool=char -DHAS_BOOL -I/usr/local/include -O2
+ -DVERSION=\"0.07\" -DXS_VERSION=\"0.07\" -fpic
+ -I/usr/local/lib/perl5/5.00503/i586-linux/CORE BerkeleyDB.c
+ BerkeleyDB.xs:93: parse error before `DB_INFO'
+ BerkeleyDB.xs:93: warning: no semicolon at end of struct or union
+ BerkeleyDB.xs:94: warning: data definition has no type or storage class
+ BerkeleyDB.xs:95: parse error before `0x80000000'
+ BerkeleyDB.xs:110: parse error before `}'
+ BerkeleyDB.xs:110: warning: data definition has no type or storage class
+ BerkeleyDB.xs:117: parse error before `DB_ENV'
+ ...
+
+This error usually happens when if you only have Berkeley DB version 1
+on your system or you have both version 1 and version 2 (or 3) of Berkeley
+DB installed on your system. When building BerkeleyDB it attempts
+to use the db.h for Berkeley DB version 1. This perl module can only
+be built with Berkeley DB version 2 or 3.
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB version 2 or 3 installed on your system
+ at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB 2 or 3 installed, but it isn't in a standard
+ place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables
+ to point to the directories where libdb.a and db.h are
+ installed.
+
+Undefined Symbol: txn_stat
+--------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
+ -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
+ -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
+ $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: txn_stat
+ at /usr/local/lib/perl5/5.00503/i586-linux/DynaLoader.pm line 169.
+ ...
+
+This error usually happens when you have both version 1 and version
+2 (or 3) of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 2/3 and the version 1
+library. Unfortunately the two versions aren't compatible with each
+other. BerkeleyDB can only be built with Berkeley DB version 2 or 3.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+Undefined Symbol: db_appinit
+----------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux
+ -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: db_appinit
+ at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm
+ ...
+
+
+This error usually happens when you have both version 2 and version
+3 of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 2 and the version 3
+library. Unfortunately the two versions aren't compatible with each
+other.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+Undefined Symbol: db_create
+---------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00561 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_61/lib/5.00561/i586-linux
+ -I/home/paul/perl/install/5.005_61/lib/5.00561 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............Can't load 'blib/arch/auto/BerkeleyDB/BerkeleyDB.so' for
+ module BerkeleyDB: blib/arch/auto/BerkeleyDB/BerkeleyDB.so:
+ undefined symbol: db_create
+ at /home/paul/perl/install/5.005_61/lib/5.00561/i586-linux/DynaLoader.pm
+ ...
+
+This error usually happens when you have both version 2 and version
+3 of Berkeley DB installed on your system and BerkeleyDB attempts
+to build using the db.h for Berkeley DB version 3 and the version 2
+library. Unfortunately the two versions aren't compatible with each
+other.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+
+
+Incompatible versions of db.h and libdb
+---------------------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00503
+ -Iblib/arch -Iblib/lib -I/usr/local/lib/perl5/5.00503/i586-linux
+ -I/usr/local/lib/perl5/5.00503 -e 'use Test::Harness qw(&runtests $verbose);
+ $verbose=0; runtests @ARGV;' t/*.t
+ t/btree.............
+ BerkeleyDB needs compatible versions of libdb & db.h
+ you have db.h version 2.6.4 and libdb version 2.7.5
+ BEGIN failed--compilation aborted at t/btree.t line 25.
+ dubious
+ Test returned status 255 (wstat 65280, 0xff00)
+ ...
+
+Another variation on the theme of having two versions of Berkeley DB on
+your system.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+ If you are running Linux, please read the Linux Notes section below.
+
+
+Linux Notes
+-----------
+
+Newer versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
+that has version 2.x of Berkeley DB linked into it. This makes it
+difficult to build this module with anything other than the version of
+Berkeley DB that shipped with your Linux release. If you do try to use
+a different version of Berkeley DB you will most likely get the error
+described in the "Incompatible versions of db.h and libdb" section of
+this file.
+
+To make matters worse, prior to Perl 5.6.1, the perl binary itself
+*always* included the Berkeley DB library.
+
+If you want to use a newer version of Berkeley DB with this module, the
+easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
+(or better).
+
+There are two approaches you can use to get older versions of Perl to
+work with specific versions of Berkeley DB. Both have their advantages
+and disadvantages.
+
+The first approach will only work when you want to build a version of
+Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
+Berkeley DB 2.x, you must use the next approach. This approach involves
+rebuilding your existing version of Perl after applying an unofficial
+patch. The "patches" directory in the this module's source distribution
+contains a number of patch files. There is one patch file for every
+stable version of Perl since 5.004. Apply the appropriate patch to your
+Perl source tree before re-building and installing Perl from scratch.
+For example, assuming you are in the top-level source directory for
+Perl 5.6.0, the command below will apply the necessary patch. Remember
+to replace the path shown below with one that points to this module's
+patches directory.
+
+ patch -p1 -N </path/to/BerkeleyDB/patches/5.6.0
+
+Now rebuild & install perl. You should now have a perl binary that can
+be used to build this module. Follow the instructions in "BUILDING THE
+MODULE", remembering to set the INCLUDE and LIB variables in config.in.
+
+
+The second approach will work with both Berkeley DB 2.x and 3.x.
+Start by building Berkeley DB as a shared library. This is from
+the Berkeley DB build instructions:
+
+ Building Shared Libraries for the GNU GCC compiler
+
+ If you're using gcc and there's no better shared library example for
+ your architecture, the following shared library build procedure will
+ probably work.
+
+ Add the -fpic option to the CFLAGS value in the Makefile.
+
+ Rebuild all of your .o files. This will create a Berkeley DB library
+ that contains .o files with PIC code. To build the shared library,
+ then take the following steps in the library build directory:
+
+ % mkdir tmp
+ % cd tmp
+ % ar xv ../libdb.a
+ % gcc -shared -o libdb.so *.o
+ % mv libdb.so ..
+ % cd ..
+ % rm -rf tmp
+
+ Note, you may have to change the gcc line depending on the
+ requirements of your system.
+
+ The file libdb.so is your shared library
+
+Once you have built libdb.so, you will need to store it somewhere safe.
+
+ cp libdb.so /usr/local/BerkeleyDB/lib
+
+If you now set the LD_PRELOAD environment variable to point to this
+shared library, Perl will use it instead of the version of Berkeley DB
+that shipped with your Linux distribution.
+
+ export LD_PRELOAD=/usr/local/BerkeleyDB/lib/libdb.so
+
+Finally follow the instructions in "BUILDING THE MODULE" to build,
+test and install this module. Don't forget to set the INCLUDE and LIB
+variables in config.in.
+
+Remember, you will need to have the LD_PRELOAD variable set anytime you
+want to use Perl with Berkeley DB. Also note that if you have LD_PRELOAD
+permanently set it will affect ALL commands you execute. This may be a
+problem if you run any commands that access a database created by the
+version of Berkeley DB that shipped with your Linux distribution.
+
+
+
+Solaris 2.5 Notes
+-----------------
+
+If you are running Solaris 2.5, and you get this error when you run the
+BerkeleyDB test harness:
+
+ libc internal error: _rmutex_unlock: rmutex not held.
+
+you probably need to install a Sun patch. It has been reported that
+Sun patch 103187-25 (or later revisions) fixes this problem.
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+Solaris 2.7 Notes
+-----------------
+
+If you are running Solaris 2.7 and all the tests in the test harness
+generate a core dump, try applying Sun patch 106980-09 (or better).
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+
+HP-UX Notes
+-----------
+
+Some people running HP-UX 10 have reported getting an error like this
+when building this module with the native HP-UX compiler.
+
+ ld: (Warning) At least one PA 2.0 object file (BerkeleyDB.o) was detected.
+ The linked output may not run on a PA 1.x system.
+ ld: Invalid loader fixup for symbol "$000000A5".
+
+If this is the case for you, Berkeley DB needs to be recompiled with
+the +z or +Z option and the resulting library placed in a .sl file. The
+following steps should do the trick:
+
+ 1: Configure the Berkeley DB distribution with the +z or +Z C compiler
+ flag:
+
+ env "CFLAGS=+z" ../dist/configure ...
+
+ 2: Edit the Berkeley DB Makefile and change:
+
+ "libdb= libdb.a" to "libdb= libdb.sl".
+
+ 3: Build and install the Berkeley DB distribution as usual.
+
+
+
+FEEDBACK
+--------
+
+How to report a problem with BerkeleyDB.
+
+To help me help you, I need of the following information:
+
+ 1. The version of Perl and the operating system name and version you
+ are running. The complete output from running "perl -V" will tell
+ me all I need to know.
+ If your perl does not understand the "-V" option is too old.
+ BerkeleyDB needs Perl version 5.004_04 or better.
+
+ 2. The version of BerkeleyDB you have. If you have successfully
+ installed BerkeleyDB, this one-liner will tell you:
+
+ perl -MBerkeleyDB -e 'print "BerkeleyDB ver $BerkeleyDB::VERSION\n"'
+
+ If you haven't installed BerkeleyDB then search BerkeleyDB.pm for a
+ line like this:
+
+ $VERSION = "1.20" ;
+
+ 3. The version of Berkeley DB you have installed. If you have
+ successfully installed BerkeleyDB, this one-liner will tell you:
+
+ perl -MBerkeleyDB -e 'print BerkeleyDB::DB_VERSION_STRING."\n"'
+
+ If you haven't installed BerkeleyDB then search db.h for a line
+ like this:
+
+ #define DB_VERSION_STRING
+
+ 4. If you are having problems building BerkeleyDB, send me a complete
+ log of what happened.
+
+ 5. Now the difficult one. If you think you have found a bug in
+ BerkeleyDB and you want me to fix it, you will *greatly* enhance
+ the chances of me being able to track it down by sending me a small
+ self-contained Perl script that illustrates the problem you are
+ encountering. Include a summary of what you think the problem is
+ and a log of what happens when you run the script, in case I can't
+ reproduce your problem on my system. If possible, don't have the
+ script dependent on an existing 20Meg database. If the script you
+ send me can create the database itself then that is preferred.
+
+ I realise that in some cases this is easier said than done, so if
+ you can only reproduce the problem in your existing script, then
+ you can post me that if you want. Just don't expect me to find your
+ problem in a hurry, or at all. :-)
+
+
+CHANGES
+-------
+
+See the Changes file.
+
+Paul Marquess <Paul.Marquess@btinternet.com>
+
diff --git a/db/perl/BerkeleyDB/Todo b/db/perl/BerkeleyDB/Todo
new file mode 100644
index 000000000..12d53bcf9
--- /dev/null
+++ b/db/perl/BerkeleyDB/Todo
@@ -0,0 +1,57 @@
+
+ * Proper documentation.
+
+ * address or document the "close all cursors if you encounter an error"
+
+ * Change the $BerkeleyDB::Error to store the info in the db object,
+ if possible.
+
+ * $BerkeleyDB::db_version is documented. &db_version isn't.
+
+ * migrate perl code into the .xs file where necessary
+
+ * convert as many of the DB examples files to BerkeleyDB format.
+
+ * add a method to the DB object to allow access to the environment (if there
+ actually is one).
+
+
+Possibles
+
+ * use '~' magic to store the inner data.
+
+ * for the get stuff zap the value to undef if it doesn't find the
+ key. This may be more intuitive for those folks who are used with
+ the $hash{key} interface.
+
+ * Text interface? This can be done as via Recno
+
+ * allow recno to allow base offset for arrays to be either 0 or 1.
+
+ * when duplicate keys are enabled, allow db_put($key, [$val1, $val2,...])
+
+
+2.x -> 3.x Upgrade
+==================
+
+Environment Verbose
+Env->open mode
+DB cache size extra parameter
+DB->open subdatabases Done
+An empty environment causes DB->open to fail
+where is __db.001 coming from? db_remove seems to create it. Bug in 3.0.55
+Change db_strerror for 0 to ""? Done
+Queue Done
+db_stat for Hash & Queue Done
+No TxnMgr
+DB->remove
+ENV->remove
+ENV->set_verbose
+upgrade
+
+ $env = BerkeleyDB::Env::Create
+ $env = create BerkeleyDB::Env
+ $status = $env->open()
+
+ $db = BerkeleyDB::Hash::Create
+ $status = $db->open()
diff --git a/db/perl/BerkeleyDB/config.in b/db/perl/BerkeleyDB/config.in
new file mode 100644
index 000000000..1718297d8
--- /dev/null
+++ b/db/perl/BerkeleyDB/config.in
@@ -0,0 +1,51 @@
+# Filename: config.in
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+
+# 1. Where is the file db.h?
+#
+# Change the path below to point to the directory where db.h is
+# installed on your system.
+
+#INCLUDE = /usr/local/include
+INCLUDE = /usr/local/BerkeleyDB/include
+#INCLUDE = ./libraries/2.7.5
+#INCLUDE = ./libraries/3.0.55
+#INCLUDE = ./libraries/3.1.17
+#INCLUDE = ./libraries/3.3.11
+
+# 2. Where is libdb?
+#
+# Change the path below to point to the directory where libdb is
+# installed on your system.
+
+#LIB = /usr/local/lib
+LIB = /usr/local/BerkeleyDB/lib
+#LIB = ./libraries/2.7.5
+#LIB = ./libraries/3.0.55
+#LIB = ./libraries/3.1.17
+#LIB = ./libraries/3.3.11
+
+# 3. Is the library called libdb?
+#
+# If you have copies of both 1.x and 2.x Berkeley DB installed on
+# your system it can sometimes be tricky to make sure you are using
+# the correct one. Renaming one (or creating a symbolic link) to
+# include the version number of the library can help.
+#
+# For example, if you have Berkeley DB 2.6.4 you could rename the
+# Berkeley DB library from libdb.a to libdb-2.6.4.a and change the
+# DBNAME line below to look like this:
+#
+# DBNAME = -ldb-2.6.4
+#
+# Note: If you are building this module with Win32, -llibdb will be
+# used by default.
+#
+# If you have changed the name of the library, uncomment the line
+# below (by removing the leading #) and edit the line to use the name
+# you have picked.
+
+#DBNAME = -ldb-3.0
+
+# end of file config.in
diff --git a/db/perl/BerkeleyDB/dbinfo b/db/perl/BerkeleyDB/dbinfo
new file mode 100755
index 000000000..415411aff
--- /dev/null
+++ b/db/perl/BerkeleyDB/dbinfo
@@ -0,0 +1,109 @@
+#!/usr/local/bin/perl
+
+# Name: dbinfo -- identify berkeley DB version used to create
+# a database file
+#
+# Author: Paul Marquess <Paul.Marquess@btinternet.com>
+# Version: 1.03
+# Date 17th September 2000
+#
+# Copyright (c) 1998-2001 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+# Todo: Print more stats on a db file, e.g. no of records
+# add log/txn/lock files
+
+use strict ;
+
+my %Data =
+ (
+ 0x053162 => {
+ Type => "Btree",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "Unknown (older than 1.71)",
+ 3 => "1.71 -> 1.85, 1.86",
+ 4 => "Unknown",
+ 5 => "2.0.0 -> 2.3.0",
+ 6 => "2.3.1 -> 2.7.7",
+ 7 => "3.0.x",
+ 8 => "3.1.x or greater",
+ }
+ },
+ 0x061561 => {
+ Type => "Hash",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "1.71 -> 1.85",
+ 3 => "1.86",
+ 4 => "2.0.0 -> 2.1.0",
+ 5 => "2.2.6 -> 2.7.7",
+ 6 => "3.0.x",
+ 7 => "3.1.x or greater",
+ }
+ },
+ 0x042253 => {
+ Type => "Queue",
+ Versions =>
+ {
+ 1 => "3.0.x",
+ 2 => "3.1.x",
+ 3 => "3.2.x or greater",
+ }
+ },
+ ) ;
+
+die "Usage: dbinfo file\n" unless @ARGV == 1 ;
+
+print "testing file $ARGV[0]...\n\n" ;
+open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
+
+my $buff ;
+read F, $buff, 20 ;
+
+my (@info) = unpack("NNNNN", $buff) ;
+my (@info1) = unpack("VVVVV", $buff) ;
+my ($magic, $version, $endian) ;
+
+if ($Data{$info[0]}) # first try DB 1.x format
+{
+ $magic = $info[0] ;
+ $version = $info[1] ;
+ $endian = "Unknown" ;
+}
+elsif ($Data{$info[3]}) # next DB 2.x big endian
+{
+ $magic = $info[3] ;
+ $version = $info[4] ;
+ $endian = "Big Endian" ;
+}
+elsif ($Data{$info1[3]}) # next DB 2.x little endian
+{
+ $magic = $info1[3] ;
+ $version = $info1[4] ;
+ $endian = "Little Endian" ;
+}
+else
+ { die "not a Berkeley DB database file.\n" }
+
+my $type = $Data{$magic} ;
+$magic = sprintf "%06X", $magic ;
+
+my $ver_string = "Unknown" ;
+$ver_string = $type->{Versions}{$version}
+ if defined $type->{Versions}{$version} ;
+
+print <<EOM ;
+File Type: Berkeley DB $type->{Type} file.
+File Version ID: $version
+Built with Berkeley DB: $ver_string
+Byte Order: $endian
+Magic: $magic
+EOM
+
+close F ;
+
+exit ;
diff --git a/db/perl/BerkeleyDB/hints/irix_6_5.pl b/db/perl/BerkeleyDB/hints/irix_6_5.pl
new file mode 100644
index 000000000..b531673e6
--- /dev/null
+++ b/db/perl/BerkeleyDB/hints/irix_6_5.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lthread" ];
diff --git a/db/perl/BerkeleyDB/hints/solaris.pl b/db/perl/BerkeleyDB/hints/solaris.pl
new file mode 100644
index 000000000..ddd941d63
--- /dev/null
+++ b/db/perl/BerkeleyDB/hints/solaris.pl
@@ -0,0 +1 @@
+$self->{LIBS} = [ "@{$self->{LIBS}} -lmt" ];
diff --git a/db/perl/BerkeleyDB/mkconsts b/db/perl/BerkeleyDB/mkconsts
new file mode 100644
index 000000000..24ef4fca7
--- /dev/null
+++ b/db/perl/BerkeleyDB/mkconsts
@@ -0,0 +1,211 @@
+#!/usr/bin/perl
+
+%constants = (
+ # Symbol 0 = define, 1 = enum
+ DB_AFTER => 0,
+ DB_APPEND => 0,
+ DB_ARCH_ABS => 0,
+ DB_ARCH_DATA => 0,
+ DB_ARCH_LOG => 0,
+ DB_BEFORE => 0,
+ DB_BTREE => 1,
+ DB_BTREEMAGIC => 0,
+ DB_BTREEOLDVER => 0,
+ DB_BTREEVERSION => 0,
+ DB_CHECKPOINT => 0,
+ DB_CONSUME => 0,
+ DB_CREATE => 0,
+ DB_CURLSN => 0,
+ DB_CURRENT => 0,
+ DB_DBT_MALLOC => 0,
+ DB_DBT_PARTIAL => 0,
+ DB_DBT_USERMEM => 0,
+ DB_DELETED => 0,
+ DB_DELIMITER => 0,
+ DB_DUP => 0,
+ DB_DUPSORT => 0,
+ DB_ENV_APPINIT => 0,
+ DB_ENV_STANDALONE => 0,
+ DB_ENV_THREAD => 0,
+ DB_EXCL => 0,
+ DB_FILE_ID_LEN => 0,
+ DB_FIRST => 0,
+ DB_FIXEDLEN => 0,
+ DB_FLUSH => 0,
+ DB_FORCE => 0,
+ DB_GET_BOTH => 0,
+ DB_GET_RECNO => 0,
+ DB_HASH => 1,
+ DB_HASHMAGIC => 0,
+ DB_HASHOLDVER => 0,
+ DB_HASHVERSION => 0,
+ DB_INCOMPLETE => 0,
+ DB_INIT_CDB => 0,
+ DB_INIT_LOCK => 0,
+ DB_INIT_LOG => 0,
+ DB_INIT_MPOOL => 0,
+ DB_INIT_TXN => 0,
+ DB_JOIN_ITEM => 0,
+ DB_KEYEMPTY => 0,
+ DB_KEYEXIST => 0,
+ DB_KEYFIRST => 0,
+ DB_KEYLAST => 0,
+ DB_LAST => 0,
+ DB_LOCK_CONFLICT => 0,
+ DB_LOCK_DEADLOCK => 0,
+ DB_LOCK_DEFAULT => 0,
+ DB_LOCK_GET => 1,
+ DB_LOCK_NORUN => 0,
+ DB_LOCK_NOTGRANTED => 0,
+ DB_LOCK_NOTHELD => 0,
+ DB_LOCK_NOWAIT => 0,
+ DB_LOCK_OLDEST => 0,
+ DB_LOCK_RANDOM => 0,
+ DB_LOCK_RIW_N => 0,
+ DB_LOCK_RW_N => 0,
+ DB_LOCK_YOUNGEST => 0,
+ DB_LOCKMAGIC => 0,
+ DB_LOCKVERSION => 0,
+ DB_LOGMAGIC => 0,
+ DB_LOGOLDVER => 0,
+ DB_MAX_PAGES => 0,
+ DB_MAX_RECORDS => 0,
+ DB_MPOOL_CLEAN => 0,
+ DB_MPOOL_CREATE => 0,
+ DB_MPOOL_DIRTY => 0,
+ DB_MPOOL_DISCARD => 0,
+ DB_MPOOL_LAST => 0,
+ DB_MPOOL_NEW => 0,
+ DB_MPOOL_PRIVATE => 0,
+ DB_MUTEXDEBUG => 0,
+ DB_MUTEXLOCKS => 0,
+ DB_NEEDSPLIT => 0,
+ DB_NEXT => 0,
+ DB_NEXT_DUP => 0,
+ DB_NOMMAP => 0,
+ DB_NOOVERWRITE => 0,
+ DB_NOSYNC => 0,
+ DB_NOTFOUND => 0,
+ DB_PAD => 0,
+ DB_PAGEYIELD => 0,
+ DB_POSITION => 0,
+ DB_PREV => 0,
+ DB_PRIVATE => 0,
+ DB_QUEUE => 1,
+ DB_RDONLY => 0,
+ DB_RECNO => 1,
+ DB_RECNUM => 0,
+ DB_RECORDCOUNT => 0,
+ DB_RECOVER => 0,
+ DB_RECOVER_FATAL => 0,
+ DB_REGISTERED => 0,
+ DB_RENUMBER => 0,
+ DB_RMW => 0,
+ DB_RUNRECOVERY => 0,
+ DB_SEQUENTIAL => 0,
+ DB_SET => 0,
+ DB_SET_RANGE => 0,
+ DB_SET_RECNO => 0,
+ DB_SNAPSHOT => 0,
+ DB_SWAPBYTES => 0,
+ DB_TEMPORARY => 0,
+ DB_THREAD => 0,
+ DB_TRUNCATE => 0,
+ DB_TXN_ABORT => 1,
+ DB_TXN_BACKWARD_ROLL => 1,
+ DB_TXN_CKP => 0,
+ DB_TXN_FORWARD_ROLL => 1,
+ DB_TXN_LOCK_2PL => 0,
+ DB_TXN_LOCK_MASK => 0,
+ DB_TXN_LOCK_OPTIMISTIC => 0,
+ DB_TXN_LOG_MASK => 0,
+ DB_TXN_LOG_REDO => 0,
+ DB_TXN_LOG_UNDO => 0,
+ DB_TXN_LOG_UNDOREDO => 0,
+ DB_TXN_NOSYNC => 0,
+ DB_TXN_NOWAIT => 0,
+ DB_TXN_SYNC => 0,
+ DB_TXN_OPENFILES => 1,
+ DB_TXN_REDO => 0,
+ DB_TXN_UNDO => 0,
+ DB_TXNMAGIC => 0,
+ DB_TXNVERSION => 0,
+ DB_TXN_LOCK_OPTIMIST => 0,
+ DB_UNKNOWN => 1,
+ DB_USE_ENVIRON => 0,
+ DB_USE_ENVIRON_ROOT => 0,
+ DB_VERSION_MAJOR => 0,
+ DB_VERSION_MINOR => 0,
+ DB_VERSION_PATCH => 0,
+ DB_WRITECURSOR => 0,
+ ) ;
+
+sub OutputXS
+{
+ # skip to the marker
+ if (0) {
+ while (<>)
+ {
+ last if /^MARKER/ ;
+ print ;
+ }
+ }
+
+ foreach my $key (sort keys %constants)
+ {
+ my $isEnum = $constants{$key} ;
+
+ if ($isEnum) {
+ print <<EOM
+ if (strEQ(name, "$key"))
+ return $key;
+EOM
+ }
+ else
+ {
+ print <<EOM
+ if (strEQ(name, "$key"))
+#ifdef $key
+ return $key;
+#else
+ goto not_there;
+#endif
+EOM
+ }
+
+ }
+
+ if (0) {
+ while (<>)
+ {
+ print ;
+ }
+ }
+}
+
+sub OutputPM
+{
+ # skip to the marker
+ if (0) {
+ while (<>)
+ {
+ last if /^MARKER/ ;
+ print ;
+ }
+ }
+
+ foreach my $key (sort keys %constants)
+ {
+ print "\t$key\n";
+ }
+
+ if (0) {
+ while (<>)
+ {
+ print ;
+ }
+ }
+}
+
+OutputXS() if $ARGV[0] =~ /xs/i ;
+OutputPM() if $ARGV[0] =~ /pm/i ;
diff --git a/db/perl/BerkeleyDB/mkpod b/db/perl/BerkeleyDB/mkpod
new file mode 100755
index 000000000..44bbf3fbf
--- /dev/null
+++ b/db/perl/BerkeleyDB/mkpod
@@ -0,0 +1,146 @@
+#!/usr/local/bin/perl5
+
+# Filename: mkpod
+#
+# Author: Paul Marquess
+
+# File types
+#
+# Macro files end with .M
+# Tagged source files end with .T
+# Output from the code ends with .O
+# Pre-Pod file ends with .P
+#
+# Tags
+#
+# ## BEGIN tagname
+# ...
+# ## END tagname
+#
+# ## 0
+# ## 1
+#
+
+# Constants
+
+$TOKEN = '##' ;
+$Verbose = 1 if $ARGV[0] =~ /^-v/i ;
+
+# Macros files first
+foreach $file (glob("*.M"))
+{
+ open (F, "<$file") or die "Cannot open '$file':$!\n" ;
+ print " Processing Macro file $file\n" ;
+ while (<F>)
+ {
+ # Skip blank & comment lines
+ next if /^\s*$/ || /^\s*#/ ;
+
+ #
+ ($name, $expand) = split (/\t+/, $_, 2) ;
+
+ $expand =~ s/^\s*// ;
+ $expand =~ s/\s*$// ;
+
+ if ($expand =~ /\[#/ )
+ {
+ }
+
+ $Macros{$name} = $expand ;
+ }
+ close F ;
+}
+
+# Suck up all the code files
+foreach $file (glob("t/*.T"))
+{
+ ($newfile = $file) =~ s/\.T$// ;
+ open (F, "<$file") or die "Cannot open '$file':$!\n" ;
+ open (N, ">$newfile") or die "Cannot open '$newfile':$!\n" ;
+
+ print " Processing $file -> $newfile\n" ;
+
+ while ($line = <F>)
+ {
+ if ($line =~ /^$TOKEN\s*BEGIN\s+(\w+)\s*$/ or
+ $line =~ m[\s*/\*$TOKEN\s*BEGIN\s+(\w+)\s*$] )
+ {
+ print " Section $1 begins\n" if $Verbose ;
+ $InSection{$1} ++ ;
+ $Section{$1} = '' unless $Section{$1} ;
+ }
+ elsif ($line =~ /^$TOKEN\s*END\s+(\w+)\s*$/ or
+ $line =~ m[^\s*/\*$TOKEN\s*END\s+(\w+)\s*$] )
+ {
+ warn "Encountered END without a begin [$line]\n"
+ unless $InSection{$1} ;
+
+ delete $InSection{$1} ;
+ print " Section $1 ends\n" if $Verbose ;
+ }
+ else
+ {
+ print N $line ;
+ chop $line ;
+ $line =~ s/\s*$// ;
+
+ # Save the current line in each of the sections
+ foreach( keys %InSection)
+ {
+ if ($line !~ /^\s*$/ )
+ #{ $Section{$_} .= " $line" }
+ { $Section{$_} .= $line }
+ $Section{$_} .= "\n" ;
+ }
+ }
+
+ }
+
+ if (%InSection)
+ {
+ # Check for unclosed sections
+ print "The following Sections are not terminated\n" ;
+ foreach (sort keys %InSection)
+ { print "\t$_\n" }
+ exit 1 ;
+ }
+
+ close F ;
+ close N ;
+}
+
+print "\n\nCreating pod file(s)\n\n" if $Verbose ;
+
+@ppods = glob('*.P') ;
+#$ppod = $ARGV[0] ;
+#$pod = $ARGV[1] ;
+
+# Now process the pre-pod file
+foreach $ppod (@ppods)
+{
+ ($pod = $ppod) =~ s/\.P$// ;
+ open (PPOD, "<$ppod") or die "Cannot open file '$ppod': $!\n" ;
+ open (POD, ">$pod") or die "Cannot open file '$pod': $!\n" ;
+
+ print " $ppod -> $pod\n" ;
+
+ while ($line = <PPOD>)
+ {
+ if ( $line =~ /^\s*$TOKEN\s*(\w+)\s*$/)
+ {
+ warn "No code insert '$1' available\n"
+ unless $Section{$1} ;
+
+ print "Expanding section $1\n" if $Verbose ;
+ print POD $Section{$1} ;
+ }
+ else
+ {
+# $line =~ s/\[#([^\]])]/$Macros{$1}/ge ;
+ print POD $line ;
+ }
+ }
+
+ close PPOD ;
+ close POD ;
+}
diff --git a/db/perl/BerkeleyDB/patches/5.004 b/db/perl/BerkeleyDB/patches/5.004
new file mode 100644
index 000000000..143ec95af
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.004
@@ -0,0 +1,44 @@
+diff perl5.004.orig/Configure perl5.004/Configure
+190a191
+> perllibs=''
+9904a9906,9913
+> : Remove libraries needed only for extensions
+> : The appropriate ext/Foo/Makefile.PL will add them back in, if
+> : necessary.
+> set X `echo " $libs " |
+> sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
+> shift
+> perllibs="$*"
+>
+10372a10382
+> perllibs='$perllibs'
+diff perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
+122c122
+< libs = $libs $cryptlib
+---
+> libs = $perllibs $cryptlib
+Common subdirectories: perl5.004.orig/Porting and perl5.004/Porting
+Common subdirectories: perl5.004.orig/cygwin32 and perl5.004/cygwin32
+Common subdirectories: perl5.004.orig/eg and perl5.004/eg
+Common subdirectories: perl5.004.orig/emacs and perl5.004/emacs
+Common subdirectories: perl5.004.orig/ext and perl5.004/ext
+Common subdirectories: perl5.004.orig/h2pl and perl5.004/h2pl
+Common subdirectories: perl5.004.orig/hints and perl5.004/hints
+Common subdirectories: perl5.004.orig/lib and perl5.004/lib
+diff perl5.004.orig/myconfig perl5.004/myconfig
+38c38
+< libs=$libs
+---
+> libs=$perllibs
+Common subdirectories: perl5.004.orig/os2 and perl5.004/os2
+diff perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
+40a41
+> ,"NODB-1.0 - remove -ldb from core perl binary."
+Common subdirectories: perl5.004.orig/plan9 and perl5.004/plan9
+Common subdirectories: perl5.004.orig/pod and perl5.004/pod
+Common subdirectories: perl5.004.orig/qnx and perl5.004/qnx
+Common subdirectories: perl5.004.orig/t and perl5.004/t
+Common subdirectories: perl5.004.orig/utils and perl5.004/utils
+Common subdirectories: perl5.004.orig/vms and perl5.004/vms
+Common subdirectories: perl5.004.orig/win32 and perl5.004/win32
+Common subdirectories: perl5.004.orig/x2p and perl5.004/x2p
diff --git a/db/perl/BerkeleyDB/patches/5.004_01 b/db/perl/BerkeleyDB/patches/5.004_01
new file mode 100644
index 000000000..1b05eb4e0
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.004_01
@@ -0,0 +1,217 @@
+diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
+*** perl5.004_01.orig/Configure Wed Jun 11 00:28:03 1997
+--- perl5.004_01/Configure Sun Nov 12 22:12:35 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9907,9912 ****
+--- 9908,9921 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10375,10380 ****
+--- 10384,10390 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
+*** perl5.004_01.orig/Makefile.SH Thu Jun 12 23:27:56 1997
+--- perl5.004_01/Makefile.SH Sun Nov 12 22:12:35 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
+*** perl5.004_01.orig/lib/ExtUtils/Embed.pm Wed Apr 2 22:12:04 1997
+--- perl5.004_01/lib/ExtUtils/Embed.pm Sun Nov 12 22:12:35 2000
+***************
+*** 170,176 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 170,176 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
+*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm Sat Jun 7 01:19:44 1997
+--- perl5.004_01/lib/ExtUtils/Liblist.pm Sun Nov 12 22:13:27 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm Thu Jun 12 22:06:18 1997
+--- perl5.004_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:12:35 2000
+***************
+*** 2137,2143 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2137,2143 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
+*** perl5.004_01.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_01/myconfig Sun Nov 12 22:12:35 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
+*** perl5.004_01.orig/patchlevel.h Wed Jun 11 03:06:10 1997
+--- perl5.004_01/patchlevel.h Sun Nov 12 22:12:35 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/BerkeleyDB/patches/5.004_02 b/db/perl/BerkeleyDB/patches/5.004_02
new file mode 100644
index 000000000..238f87379
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.004_02
@@ -0,0 +1,217 @@
+diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
+*** perl5.004_02.orig/Configure Thu Aug 7 15:08:44 1997
+--- perl5.004_02/Configure Sun Nov 12 22:06:24 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
+*** perl5.004_02.orig/Makefile.SH Thu Aug 7 13:10:53 1997
+--- perl5.004_02/Makefile.SH Sun Nov 12 22:06:24 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
+*** perl5.004_02.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_02/lib/ExtUtils/Embed.pm Sun Nov 12 22:06:24 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
+*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_02/lib/ExtUtils/Liblist.pm Sun Nov 12 22:06:24 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm Tue Aug 5 14:28:08 1997
+--- perl5.004_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:06:25 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
+*** perl5.004_02.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_02/myconfig Sun Nov 12 22:06:25 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
+*** perl5.004_02.orig/patchlevel.h Fri Aug 1 15:07:34 1997
+--- perl5.004_02/patchlevel.h Sun Nov 12 22:06:25 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/BerkeleyDB/patches/5.004_03 b/db/perl/BerkeleyDB/patches/5.004_03
new file mode 100644
index 000000000..06331eac9
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.004_03
@@ -0,0 +1,223 @@
+diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
+*** perl5.004_03.orig/Configure Wed Aug 13 16:09:46 1997
+--- perl5.004_03/Configure Sun Nov 12 21:56:18 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.004_03: Configure.orig
+diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
+*** perl5.004_03.orig/Makefile.SH Mon Aug 18 19:24:29 1997
+--- perl5.004_03/Makefile.SH Sun Nov 12 21:56:18 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.004_03: Makefile.SH.orig
+diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
+*** perl5.004_03.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_03/lib/ExtUtils/Embed.pm Sun Nov 12 21:56:18 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
+*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_03/lib/ExtUtils/Liblist.pm Sun Nov 12 21:57:17 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
+diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm Mon Aug 18 19:16:12 1997
+--- perl5.004_03/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:56:19 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
+*** perl5.004_03.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_03/myconfig Sun Nov 12 21:56:19 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
+*** perl5.004_03.orig/patchlevel.h Wed Aug 13 11:42:01 1997
+--- perl5.004_03/patchlevel.h Sun Nov 12 21:56:19 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
+Only in perl5.004_03: patchlevel.h.orig
diff --git a/db/perl/BerkeleyDB/patches/5.004_04 b/db/perl/BerkeleyDB/patches/5.004_04
new file mode 100644
index 000000000..a227dc700
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.004_04
@@ -0,0 +1,209 @@
+diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
+*** perl5.004_04.orig/Configure Fri Oct 3 18:57:39 1997
+--- perl5.004_04/Configure Sun Nov 12 21:50:51 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9910,9915 ****
+--- 9911,9924 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10378,10383 ****
+--- 10387,10393 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
+*** perl5.004_04.orig/Makefile.SH Wed Oct 15 10:33:16 1997
+--- perl5.004_04/Makefile.SH Sun Nov 12 21:50:51 2000
+***************
+*** 129,135 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 129,135 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
+*** perl5.004_04.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_04/lib/ExtUtils/Embed.pm Sun Nov 12 21:50:51 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
+*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm Tue Sep 9 17:41:32 1997
+--- perl5.004_04/lib/ExtUtils/Liblist.pm Sun Nov 12 21:51:33 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 189,195 ****
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 189,195 ----
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 539,545 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 539,545 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm Wed Oct 8 14:13:51 1997
+--- perl5.004_04/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:50:51 2000
+***************
+*** 2229,2235 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2229,2235 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
+*** perl5.004_04.orig/myconfig Mon Oct 6 18:26:49 1997
+--- perl5.004_04/myconfig Sun Nov 12 21:50:51 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
+*** perl5.004_04.orig/patchlevel.h Wed Oct 15 10:55:19 1997
+--- perl5.004_04/patchlevel.h Sun Nov 12 21:50:51 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/BerkeleyDB/patches/5.004_05 b/db/perl/BerkeleyDB/patches/5.004_05
new file mode 100644
index 000000000..51c8bf350
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.004_05
@@ -0,0 +1,209 @@
+diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
+*** perl5.004_05.orig/Configure Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Configure Sun Nov 12 21:36:25 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 10164,10169 ****
+--- 10165,10178 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10648,10653 ****
+--- 10657,10663 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
+*** perl5.004_05.orig/Makefile.SH Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Makefile.SH Sun Nov 12 21:36:25 2000
+***************
+*** 151,157 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 151,157 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
+*** perl5.004_05.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_05/lib/ExtUtils/Embed.pm Sun Nov 12 21:36:25 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
+*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/Liblist.pm Sun Nov 12 21:45:31 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 590,596 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 590,596 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:36:25 2000
+***************
+*** 2246,2252 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2246,2252 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
+*** perl5.004_05.orig/myconfig Thu Jan 6 22:05:55 2000
+--- perl5.004_05/myconfig Sun Nov 12 21:43:54 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
+*** perl5.004_05.orig/patchlevel.h Thu Jan 6 22:05:48 2000
+--- perl5.004_05/patchlevel.h Sun Nov 12 21:36:25 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/BerkeleyDB/patches/5.005 b/db/perl/BerkeleyDB/patches/5.005
new file mode 100644
index 000000000..effee3e82
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.005
@@ -0,0 +1,209 @@
+diff -rc perl5.005.orig/Configure perl5.005/Configure
+*** perl5.005.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005/Configure Sun Nov 12 21:30:40 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
+*** perl5.005.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005/Makefile.SH Sun Nov 12 21:30:40 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
+*** perl5.005.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005/lib/ExtUtils/Embed.pm Sun Nov 12 21:30:40 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
+*** perl5.005.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005/lib/ExtUtils/Liblist.pm Sun Nov 12 21:30:40 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
+*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:30:41 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005.orig/myconfig perl5.005/myconfig
+*** perl5.005.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005/myconfig Sun Nov 12 21:30:41 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
+*** perl5.005.orig/patchlevel.h Wed Jul 22 19:22:01 1998
+--- perl5.005/patchlevel.h Sun Nov 12 21:30:41 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/BerkeleyDB/patches/5.005_01 b/db/perl/BerkeleyDB/patches/5.005_01
new file mode 100644
index 000000000..2a05dd545
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.005_01
@@ -0,0 +1,209 @@
+diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
+*** perl5.005_01.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005_01/Configure Sun Nov 12 20:55:58 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
+*** perl5.005_01.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_01/Makefile.SH Sun Nov 12 20:55:58 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
+*** perl5.005_01.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_01/lib/ExtUtils/Embed.pm Sun Nov 12 20:55:58 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
+*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005_01/lib/ExtUtils/Liblist.pm Sun Nov 12 20:55:58 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:55:58 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
+*** perl5.005_01.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_01/myconfig Sun Nov 12 20:55:58 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
+*** perl5.005_01.orig/patchlevel.h Mon Jan 3 11:07:45 2000
+--- perl5.005_01/patchlevel.h Sun Nov 12 20:55:58 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/BerkeleyDB/patches/5.005_02 b/db/perl/BerkeleyDB/patches/5.005_02
new file mode 100644
index 000000000..5dd57ddc0
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.005_02
@@ -0,0 +1,264 @@
+diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
+*** perl5.005_02.orig/Configure Mon Jan 3 11:12:20 2000
+--- perl5.005_02/Configure Sun Nov 12 20:50:51 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11334,11339 ****
+--- 11335,11348 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11859,11864 ****
+--- 11868,11874 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.005_02: Configure.orig
+diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
+*** perl5.005_02.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_02/Makefile.SH Sun Nov 12 20:50:51 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.005_02: Makefile.SH.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
+*** perl5.005_02.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_02/lib/ExtUtils/Embed.pm Sun Nov 12 20:50:51 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
+*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm Mon Jan 3 11:12:21 2000
+--- perl5.005_02/lib/ExtUtils/Liblist.pm Sun Nov 12 20:50:51 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 333,339 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 333,339 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 623,629 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 623,629 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+***************
+*** 666,672 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 666,672 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 676,682 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 676,682 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:50:51 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
+*** perl5.005_02.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_02/myconfig Sun Nov 12 20:50:51 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
+*** perl5.005_02.orig/patchlevel.h Mon Jan 3 11:12:19 2000
+--- perl5.005_02/patchlevel.h Sun Nov 12 20:50:51 2000
+***************
+*** 40,45 ****
+--- 40,46 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/BerkeleyDB/patches/5.005_03 b/db/perl/BerkeleyDB/patches/5.005_03
new file mode 100644
index 000000000..115f9f5b9
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.005_03
@@ -0,0 +1,250 @@
+diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
+*** perl5.005_03.orig/Configure Sun Mar 28 17:12:57 1999
+--- perl5.005_03/Configure Sun Sep 17 22:19:16 2000
+***************
+*** 208,213 ****
+--- 208,214 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11642,11647 ****
+--- 11643,11656 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 12183,12188 ****
+--- 12192,12198 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
+*** perl5.005_03.orig/Makefile.SH Thu Mar 4 02:35:25 1999
+--- perl5.005_03/Makefile.SH Sun Sep 17 22:21:01 2000
+***************
+*** 58,67 ****
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+--- 58,67 ----
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+***************
+*** 155,161 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 155,161 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
+*** perl5.005_03.orig/lib/ExtUtils/Embed.pm Wed Jan 6 02:17:50 1999
+--- perl5.005_03/lib/ExtUtils/Embed.pm Sun Sep 17 22:19:16 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
+*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm Wed Jan 6 02:17:47 1999
+--- perl5.005_03/lib/ExtUtils/Liblist.pm Sun Sep 17 22:19:16 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 336,342 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 336,342 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 626,632 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 626,632 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 670,676 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 670,676 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 680,686 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 680,686 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm Fri Mar 5 00:34:20 1999
+--- perl5.005_03/lib/ExtUtils/MM_Unix.pm Sun Sep 17 22:19:16 2000
+***************
+*** 2284,2290 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2284,2290 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
diff --git a/db/perl/BerkeleyDB/patches/5.6.0 b/db/perl/BerkeleyDB/patches/5.6.0
new file mode 100644
index 000000000..1f9b3b620
--- /dev/null
+++ b/db/perl/BerkeleyDB/patches/5.6.0
@@ -0,0 +1,294 @@
+diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
+*** perl-5.6.0.orig/Configure Wed Mar 22 20:36:37 2000
+--- perl-5.6.0/Configure Sun Sep 17 23:40:15 2000
+***************
+*** 217,222 ****
+--- 217,223 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 14971,14976 ****
+--- 14972,14985 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 15640,15645 ****
+--- 15649,15655 ----
+ path_sep='$path_sep'
+ perl5='$perl5'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
+*** perl-5.6.0.orig/Makefile.SH Sat Mar 11 16:05:24 2000
+--- perl-5.6.0/Makefile.SH Sun Sep 17 23:40:15 2000
+***************
+*** 70,76 ****
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+--- 70,76 ----
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+***************
+*** 176,182 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 176,182 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+***************
+*** 333,339 ****
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $libs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+--- 333,339 ----
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $perllibs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Embed.pm perl-5.6.0/lib/ExtUtils/Embed.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Embed.pm Sun Jan 23 12:08:32 2000
+--- perl-5.6.0/lib/ExtUtils/Embed.pm Sun Sep 17 23:40:15 2000
+***************
+*** 193,199 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 193,199 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Liblist.pm perl-5.6.0/lib/ExtUtils/Liblist.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Liblist.pm Wed Mar 22 16:16:31 2000
+--- perl-5.6.0/lib/ExtUtils/Liblist.pm Sun Sep 17 23:40:15 2000
+***************
+*** 17,34 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 17,34 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 198,204 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 198,204 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 338,344 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 338,344 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 624,630 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 624,630 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 668,674 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 668,674 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 678,684 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 678,684 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
+*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm Thu Mar 2 17:52:52 2000
+--- perl-5.6.0/lib/ExtUtils/MM_Unix.pm Sun Sep 17 23:40:15 2000
+***************
+*** 2450,2456 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2450,2456 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
+*** perl-5.6.0.orig/myconfig.SH Sat Feb 26 06:34:49 2000
+--- perl-5.6.0/myconfig.SH Sun Sep 17 23:41:17 2000
+***************
+*** 48,54 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 48,54 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
+*** perl-5.6.0.orig/patchlevel.h Wed Mar 22 20:23:11 2000
+--- perl-5.6.0/patchlevel.h Sun Sep 17 23:40:15 2000
+***************
+*** 70,75 ****
+--- 70,76 ----
+ #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/BerkeleyDB/t/btree.t b/db/perl/BerkeleyDB/t/btree.t
new file mode 100644
index 000000000..c85271d99
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/btree.t
@@ -0,0 +1,925 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..243\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Btree -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Btree -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Btree -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Btree
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put("some key", "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get("some key", $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+ ok 12, $db->db_get("key", $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del("some key") == 0 ;
+ ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
+ ok 16, $db->status() == DB_NOTFOUND ;
+ ok 17, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 18, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 19, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 20, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 21, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 22, $db->db_get("key", $value) == 0 ;
+ ok 23, $value eq "value" ;
+
+ # test DB_GET_BOTH
+ my ($k, $v) = ("key", "value") ;
+ ok 24, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("key", "fred") ;
+ ok 25, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("another", "value") ;
+ ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+
+}
+
+{
+ # Check simple env works with a hash.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 27, my $lexD = new LexDir($home) ;
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 29, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put("some key", "some value") == 0 ;
+ ok 31, $db->db_get("some key", $value) == 0 ;
+ ok 32, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 33, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = $v = "" ;
+ my %copy = %data ;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'};
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'};
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+
+ ($k, $v) = ("green", "house") ;
+ ok 46, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("green", "door") ;
+ ok 47, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("black", "house") ;
+ ok 48, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+}
+
+{
+ # Tied Hash interface
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 49, tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # check "each" with an empty database
+ my $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ ++ $count ;
+ }
+ ok 50, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 51, $count == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $hash{"some key"} = "some value";
+ ok 52, (tied %hash)->status() == 0 ;
+ ok 53, $hash{"some key"} eq "some value";
+ ok 54, defined $hash{"some key"} ;
+ ok 55, (tied %hash)->status() == 0 ;
+ ok 56, exists $hash{"some key"} ;
+ ok 57, !defined $hash{"jimmy"} ;
+ ok 58, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 59, !exists $hash{"jimmy"} ;
+ ok 60, (tied %hash)->status() == DB_NOTFOUND ;
+
+ delete $hash{"some key"} ;
+ ok 61, (tied %hash)->status() == 0 ;
+ ok 62, ! defined $hash{"some key"} ;
+ ok 63, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 64, ! exists $hash{"some key"} ;
+ ok 65, (tied %hash)->status() == DB_NOTFOUND ;
+
+ $hash{1} = 2 ;
+ $hash{10} = 20 ;
+ $hash{1000} = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 66, $count == 3 ;
+ ok 67, $keys == 1011 ;
+ ok 68, $values == 2022 ;
+
+ # now clear the hash
+ %hash = () ;
+ ok 69, keys %hash == 0 ;
+
+ untie %hash ;
+}
+
+{
+ # override default compare
+ my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
+ my $value ;
+ my (%h, %g, %k) ;
+ my @Keys = qw( 0123 12 -1234 9 987654321 def ) ;
+ ok 70, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -Flags => DB_CREATE ;
+
+ ok 71, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -Flags => DB_CREATE ;
+
+ ok 72, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3,
+ -Compare => sub { length $_[0] <=> length $_[1] },
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+ my @srt_3 = sort { length $a <=> length $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ $h{$_} = 1 ;
+ $g{$_} = 1 ;
+ $k{$_} = 1 ;
+ }
+
+ sub ArrayCompare
+ {
+ my($a, $b) = @_ ;
+
+ return 0 if @$a != @$b ;
+
+ foreach (1 .. length @$a)
+ {
+ return 0 unless $$a[$_] eq $$b[$_] ;
+ }
+
+ 1 ;
+ }
+
+ ok 73, ArrayCompare (\@srt_1, [keys %h]);
+ ok 74, ArrayCompare (\@srt_2, [keys %g]);
+ ok 75, ArrayCompare (\@srt_3, [keys %k]);
+
+}
+
+{
+ # override default compare, with duplicates, don't sort values
+ my $lex = new LexFile $Dfile, $Dfile2, $Dfile3 ;
+ my $value ;
+ my (%h, %g, %k) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 def ) ;
+ my @Values = qw( 1 0 3 dd x abc 0 ) ;
+ ok 76, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 77, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 78, tie %k, 'BerkeleyDB::Btree', -Filename => $Dfile3,
+ -Compare => sub { length $_[0] <=> length $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+ my @srt_3 = sort { length $a <=> length $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ $k{$_} = $value ;
+ }
+
+ sub getValues
+ {
+ my $hash = shift ;
+ my $db = tied %$hash ;
+ my $cursor = $db->db_cursor() ;
+ my @values = () ;
+ my ($k, $v) = (0,0) ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ push @values, $v ;
+ }
+ return @values ;
+ }
+
+ ok 79, ArrayCompare (\@srt_1, [keys %h]);
+ ok 80, ArrayCompare (\@srt_2, [keys %g]);
+ ok 81, ArrayCompare (\@srt_3, [keys %k]);
+ ok 82, ArrayCompare ([qw(dd 0 0 x 3 1 abc)], [getValues \%h]);
+ ok 83, ArrayCompare ([qw(dd 1 0 3 x abc 0)], [getValues \%g]);
+ ok 84, ArrayCompare ([qw(0 x 3 0 1 dd abc)], [getValues \%k]);
+
+ # test DB_DUP_NEXT
+ ok 85, my $cur = (tied %g)->db_cursor() ;
+ my ($k, $v) = (9, "") ;
+ ok 86, $cur->c_get($k, $v, DB_SET) == 0 ;
+ ok 87, $k == 9 && $v == 0 ;
+ ok 88, $cur->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 89, $k == 9 && $v eq "x" ;
+ ok 90, $cur->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
+}
+
+{
+ # override default compare, with duplicates, sort values
+ my $lex = new LexFile $Dfile, $Dfile2;
+ my $value ;
+ my (%h, %g) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 9 def ) ;
+ my @Values = qw( 1 11 3 dd x abc 2 0 ) ;
+ ok 91, tie %h, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -Compare => sub { $_[0] <=> $_[1] },
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ ok 92, tie %g, 'BerkeleyDB::Btree', -Filename => $Dfile2,
+ -Compare => sub { $_[0] cmp $_[1] },
+ -DupCompare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP,
+
+
+
+ -Flags => DB_CREATE ;
+
+ my @srt_1 ;
+ { local $^W = 0 ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+ }
+ my @srt_2 = sort { $a cmp $b } @Keys ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ }
+
+ ok 93, ArrayCompare (\@srt_1, [keys %h]);
+ ok 94, ArrayCompare (\@srt_2, [keys %g]);
+ ok 95, ArrayCompare ([qw(dd 1 3 x 2 11 abc 0)], [getValues \%g]);
+ ok 96, ArrayCompare ([qw(dd 0 11 2 x 3 1 abc)], [getValues \%h]);
+
+}
+
+{
+ # get_dup etc
+ my $lex = new LexFile $Dfile;
+ my %hh ;
+
+ ok 97, my $YY = tie %hh, "BerkeleyDB::Btree", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hh{'Wall'} = 'Larry' ;
+ $hh{'Wall'} = 'Stone' ; # Note the duplicate key
+ $hh{'Wall'} = 'Brick' ; # Note the duplicate key
+ $hh{'Smith'} = 'John' ;
+ $hh{'mouse'} = 'mickey' ;
+
+ # first work in scalar context
+ ok 98, scalar $YY->get_dup('Unknown') == 0 ;
+ ok 99, scalar $YY->get_dup('Smith') == 1 ;
+ ok 100, scalar $YY->get_dup('Wall') == 3 ;
+
+ # now in list context
+ my @unknown = $YY->get_dup('Unknown') ;
+ ok 101, "@unknown" eq "" ;
+
+ my @smith = $YY->get_dup('Smith') ;
+ ok 102, "@smith" eq "John" ;
+
+ {
+ my @wall = $YY->get_dup('Wall') ;
+ my %wall ;
+ @wall{@wall} = @wall ;
+ ok 103, (@wall == 3 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'});
+ }
+
+ # hash
+ my %unknown = $YY->get_dup('Unknown', 1) ;
+ ok 104, keys %unknown == 0 ;
+
+ my %smith = $YY->get_dup('Smith', 1) ;
+ ok 105, keys %smith == 1 && $smith{'John'} ;
+
+ my %wall = $YY->get_dup('Wall', 1) ;
+ ok 106, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 1 ;
+
+ undef $YY ;
+ untie %hh ;
+
+}
+
+{
+ # in-memory file
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $fd ;
+ my $value ;
+ ok 107, my $db = tie %hash, 'BerkeleyDB::Btree' ;
+
+ ok 108, $db->db_put("some key", "some value") == 0 ;
+ ok 109, $db->db_get("some key", $value) == 0 ;
+ ok 110, $value eq "some value" ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ ok 111, my $db = new BerkeleyDB::Btree, -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 112, $ret == 0 ;
+
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 113, ! $pon && $off == 0 && $len == 0 ;
+ ok 114, $db->db_get("red", $value) == 0 && $value eq "bo" ;
+ ok 115, $db->db_get("green", $value) == 0 && $value eq "ho" ;
+ ok 116, $db->db_get("blue", $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 117, $pon ;
+ ok 118, $off == 0 ;
+ ok 119, $len == 2 ;
+ ok 120, $db->db_get("red", $value) == 0 && $value eq "t" ;
+ ok 121, $db->db_get("green", $value) == 0 && $value eq "se" ;
+ ok 122, $db->db_get("blue", $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 123, $pon ;
+ ok 124, $off == 3 ;
+ ok 125, $len == 2 ;
+ ok 126, $db->db_get("red", $value) == 0 && $value eq "boat" ;
+ ok 127, $db->db_get("green", $value) == 0 && $value eq "house" ;
+ ok 128, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 129, $db->db_put("red", "") == 0 ;
+ ok 130, $db->db_put("green", "AB") == 0 ;
+ ok 131, $db->db_put("blue", "XYZ") == 0 ;
+ ok 132, $db->db_put("new", "KLM") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 133, $pon ;
+ ok 134, $off == 0 ;
+ ok 135, $len == 2 ;
+ ok 136, $db->db_get("red", $value) == 0 && $value eq "at" ;
+ ok 137, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
+ ok 138, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
+ ok 139, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 140, ! $pon ;
+ ok 141, $off == 0 ;
+ ok 142, $len == 0 ;
+ ok 143, $db->db_put("red", "PPP") == 0 ;
+ ok 144, $db->db_put("green", "Q") == 0 ;
+ ok 145, $db->db_put("blue", "XYZ") == 0 ;
+ ok 146, $db->db_put("new", "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 147, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
+ ok 148, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
+ ok 149, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
+ ok 150, $db->db_get("new", $value) == 0 && $value eq "KLMTU" ;
+}
+
+{
+ # partial
+ # check works via tied hash
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 151, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ while (my ($k, $v) = each %data) {
+ $hash{$k} = $v ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 152, $hash{"red"} eq "bo" ;
+ ok 153, $hash{"green"} eq "ho" ;
+ ok 154, $hash{"blue"} eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 155, $hash{"red"} eq "t" ;
+ ok 156, $hash{"green"} eq "se" ;
+ ok 157, $hash{"blue"} eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 158, $hash{"red"} eq "boat" ;
+ ok 159, $hash{"green"} eq "house" ;
+ ok 160, $hash{"blue"} eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 161, $hash{"red"} = "" ;
+ ok 162, $hash{"green"} = "AB" ;
+ ok 163, $hash{"blue"} = "XYZ" ;
+ ok 164, $hash{"new"} = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 165, $hash{"red"} eq "at" ;
+ ok 166, $hash{"green"} eq "ABuse" ;
+ ok 167, $hash{"blue"} eq "XYZa" ;
+ ok 168, $hash{"new"} eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 169, $hash{"red"} = "PPP" ;
+ ok 170, $hash{"green"} = "Q" ;
+ ok 171, $hash{"blue"} = "XYZ" ;
+ ok 172, $hash{"new"} = "TU" ;
+
+ $db->partial_clear() ;
+ ok 173, $hash{"red"} eq "at\0PPP" ;
+ ok 174, $hash{"green"} eq "ABuQ" ;
+ ok 175, $hash{"blue"} eq "XYZXYZ" ;
+ ok 176, $hash{"new"} eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 177, my $lexD = new LexDir($home) ;
+ ok 178, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 179, my $txn = $env->txn_begin() ;
+ ok 180, my $db1 = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 181, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 182, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 183, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ #ok 151, $txn->txn_abort() == 0 ;
+ ok 184, (my $Z = $txn->txn_abort()) == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 185, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 186, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # DB_DUP
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 187, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 188, keys %hash == 6 ;
+
+ # create a cursor
+ ok 189, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 190, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 191, $key eq "Wall" && $value eq "Larry" ;
+ ok 192, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 193, $key eq "Wall" && $value eq "Stone" ;
+ ok 194, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 195, $key eq "Wall" && $value eq "Brick" ;
+ ok 196, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 197, $key eq "Wall" && $value eq "Brick" ;
+
+ my $ref = $db->db_stat() ;
+ ok 198, ($ref->{bt_flags} | DB_DUP) == DB_DUP ;
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 199, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Minkey =>3 ,
+ -Pagesize => 2 **12
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 200, $ref->{$recs} == 0;
+ ok 201, $ref->{'bt_minkey'} == 3;
+ ok 202, $ref->{'bt_pagesize'} == 2 ** 12;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 203, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 204, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Btree);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 205, $@ eq "" ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB", -Filename => "dbbtree.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 206, $@ eq "" ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok 207, $@ eq "" ;
+ main::ok 208, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
+ main::ok 209, $@ eq "" ;
+ main::ok 210, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 211, $@ eq "" ;
+ main::ok 212, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok 213, $@ eq "" ;
+ main::ok 214, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DB_RECNUM, DB_SET_RECNO & DB_GET_RECNO
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) = ("", "");
+ ok 215, my $db = new BerkeleyDB::Btree
+ -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Property => DB_RECNUM ;
+
+
+ # create some data
+ my @data = (
+ "A zero",
+ "B one",
+ "C two",
+ "D three",
+ "E four"
+ ) ;
+
+ my $ix = 0 ;
+ my $ret = 0 ;
+ foreach (@data) {
+ $ret += $db->db_put($_, $ix) ;
+ ++ $ix ;
+ }
+ ok 216, $ret == 0 ;
+
+ # db_get & DB_SET_RECNO
+ $k = 1 ;
+ ok 217, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 218, $k eq "B one" && $v == 1 ;
+
+ $k = 3 ;
+ ok 219, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 220, $k eq "D three" && $v == 3 ;
+
+ $k = 4 ;
+ ok 221, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 222, $k eq "E four" && $v == 4 ;
+
+ $k = 0 ;
+ ok 223, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 224, $k eq "A zero" && $v == 0 ;
+
+ # cursor & DB_SET_RECNO
+
+ # create the cursor
+ ok 225, my $cursor = $db->db_cursor() ;
+
+ $k = 2 ;
+ ok 226, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 227, $k eq "C two" && $v == 2 ;
+
+ $k = 0 ;
+ ok 228, $cursor->c_get($k, $v, DB_SET_RECNO) == 0;
+ ok 229, $k eq "A zero" && $v == 0 ;
+
+ $k = 3 ;
+ ok 230, $db->db_get($k, $v, DB_SET_RECNO) == 0;
+ ok 231, $k eq "D three" && $v == 3 ;
+
+ # cursor & DB_GET_RECNO
+ ok 232, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 233, $k eq "A zero" && $v == 0 ;
+ ok 234, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 235, $v == 0 ;
+
+ ok 236, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 237, $k eq "B one" && $v == 1 ;
+ ok 238, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 239, $v == 1 ;
+
+ ok 240, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 241, $k eq "E four" && $v == 4 ;
+ ok 242, $cursor->c_get($k, $v, DB_GET_RECNO) == 0;
+ ok 243, $v == 4 ;
+
+}
+
diff --git a/db/perl/BerkeleyDB/t/db-3.0.t b/db/perl/BerkeleyDB/t/db-3.0.t
new file mode 100644
index 000000000..971a5e830
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/db-3.0.t
@@ -0,0 +1,89 @@
+#!./perl -w
+
+# ID: 1.2, 7/17/97
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipped - this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..14\n";
+
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # set_mutexlocks
+
+ my $home = "./fred" ;
+ ok 1, my $lexD = new LexDir($home) ;
+ chdir "./fred" ;
+ ok 2, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ;
+ ok 3, $env->set_mutexlocks(0) == 0 ;
+ chdir ".." ;
+ undef $env ;
+}
+
+{
+ # c_dup
+
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 4, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 5, $ret == 0 ;
+
+ # create a cursor
+ ok 6, my $cursor = $db->db_cursor() ;
+
+ # point to a specific k/v pair
+ $k = "green" ;
+ ok 7, $cursor->c_get($k, $v, DB_SET) == 0 ;
+ ok 8, $v eq "house" ;
+
+ # duplicate the cursor
+ my $dup_cursor = $cursor->c_dup(DB_POSITION);
+ ok 9, $dup_cursor ;
+
+ # move original cursor off green/house
+ $cursor->c_get($k, $v, DB_NEXT) ;
+ ok 10, $k ne "green" ;
+ ok 11, $v ne "house" ;
+
+ # duplicate cursor should still be on green/house
+ ok 12, $dup_cursor->c_get($k, $v, DB_CURRENT) == 0;
+ ok 13, $k eq "green" ;
+ ok 14, $v eq "house" ;
+
+}
diff --git a/db/perl/BerkeleyDB/t/db-3.1.t b/db/perl/BerkeleyDB/t/db-3.1.t
new file mode 100644
index 000000000..d01681e54
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/db-3.1.t
@@ -0,0 +1,123 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.1) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.1.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..25\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+
+{
+ # c_count
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 1, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 2, keys %hash == 6 ;
+
+ # create a cursor
+ ok 3, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 4, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 5, $key eq "Wall" && $value eq "Larry" ;
+
+ my $count ;
+ ok 6, $cursor->c_count($count) == 0 ;
+ ok 7, $count == 4 ;
+
+ $key = "Smith" ;
+ ok 8, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 9, $key eq "Smith" && $value eq "John" ;
+
+ ok 10, $cursor->c_count($count) == 0 ;
+ ok 11, $count == 1 ;
+
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # db_key_range
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 12, my $db = tie %hash, 'BerkeleyDB::Btree', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 13, keys %hash == 6 ;
+
+ my $key = "Wall" ;
+ my ($less, $equal, $greater) ;
+ ok 14, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
+
+ ok 15, $less != 0 ;
+ ok 16, $equal != 0 ;
+ ok 17, $greater != 0 ;
+
+ $key = "Smith" ;
+ ok 18, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
+
+ ok 19, $less == 0 ;
+ ok 20, $equal != 0 ;
+ ok 21, $greater != 0 ;
+
+ $key = "NotThere" ;
+ ok 22, $db->db_key_range($key, $less, $equal, $greater) == 0 ;
+
+ ok 23, $less == 0 ;
+ ok 24, $equal == 0 ;
+ ok 25, $greater == 1 ;
+
+ undef $db ;
+ untie %hash ;
+
+}
diff --git a/db/perl/BerkeleyDB/t/db-3.2.t b/db/perl/BerkeleyDB/t/db-3.2.t
new file mode 100644
index 000000000..1f7ad1504
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/db-3.2.t
@@ -0,0 +1,42 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.2) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.2.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..1\n";
+
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+
+{
+ # set_q_extentsize
+
+ ok 1, 1 ;
+}
+
diff --git a/db/perl/BerkeleyDB/t/destroy.t b/db/perl/BerkeleyDB/t/destroy.t
new file mode 100644
index 000000000..c060bc744
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/destroy.t
@@ -0,0 +1,101 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..13\n";
+
+my $Dfile = "dbhash.tmp";
+my $home = "./fred" ;
+
+umask(0);
+
+{
+ # let object destroction kill everything
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ ok 1, my $lexD = new LexDir($home) ;
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 3, my $txn = $env->txn_begin() ;
+ ok 4, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 5, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 6, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 7, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 8, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 9, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 10, $count == 0 ;
+
+ #undef $txn ;
+ #undef $cursor ;
+ #undef $db1 ;
+ #undef $env ;
+ #untie %hash ;
+
+}
+{
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $cursor ;
+ my ($k, $v) = ("", "") ;
+ ok 11, my $db1 = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ my $count = 0 ;
+ # sequence forwards
+ ok 12, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 13, $count == 0 ;
+}
+
+
diff --git a/db/perl/BerkeleyDB/t/env.t b/db/perl/BerkeleyDB/t/env.t
new file mode 100644
index 000000000..d6ddc9860
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/env.t
@@ -0,0 +1,233 @@
+#!./perl -w
+
+# ID: 1.2, 7/17/97
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..52\n";
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # db version stuff
+ my ($major, $minor, $patch) = (0, 0, 0) ;
+
+ ok 1, my $VER = BerkeleyDB::DB_VERSION_STRING ;
+ ok 2, my $ver = BerkeleyDB::db_version($major, $minor, $patch) ;
+ ok 3, $VER eq $ver ;
+ ok 4, $major > 1 ;
+ ok 5, defined $minor ;
+ ok 6, defined $patch ;
+}
+
+{
+ # Check for invalid parameters
+ my $env ;
+ eval ' $env = new BerkeleyDB::Env( -Stupid => 3) ; ' ;
+ ok 7, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $env = new BerkeleyDB::Env( -Bad => 2, -Home => "/tmp", -Stupid => 3) ; ' ;
+ ok 8, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $env = new BerkeleyDB::Env (-Config => {"fred" => " "} ) ; ' ;
+ ok 9, !$env ;
+ ok 10, $BerkeleyDB::Error =~ /^illegal name-value pair/ ;
+}
+
+{
+ # create a very simple environment
+ my $home = "./fred" ;
+ ok 11, my $lexD = new LexDir($home) ;
+ chdir "./fred" ;
+ ok 12, my $env = new BerkeleyDB::Env -Flags => DB_CREATE ;
+ chdir ".." ;
+ undef $env ;
+}
+
+{
+ # create an environment with a Home
+ my $home = "./fred" ;
+ ok 13, my $lexD = new LexDir($home) ;
+ ok 14, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE ;
+
+ undef $env ;
+}
+
+{
+ # make new fail.
+ my $home = "./not_there" ;
+ rmtree $home ;
+ ok 15, ! -d $home ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_INIT_LOCK ;
+ ok 16, ! $env ;
+ ok 17, $! != 0 ;
+
+ rmtree $home ;
+}
+
+{
+ # Config
+ use Cwd ;
+ my $cwd = cwd() ;
+ my $home = "$cwd/fred" ;
+ my $data_dir = "$home/data_dir" ;
+ my $log_dir = "$home/log_dir" ;
+ my $data_file = "data.db" ;
+ ok 18, my $lexD = new LexDir($home) ;
+ ok 19, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
+ ok 20, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Config => { DB_DATA_DIR => $data_dir,
+ DB_LOG_DIR => $log_dir
+ },
+ -Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 21, $env ;
+
+ ok 22, my $txn = $env->txn_begin() ;
+
+ my %hash ;
+ ok 23, tie %hash, 'BerkeleyDB::Hash', -Filename => $data_file,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ $hash{"abc"} = 123 ;
+ $hash{"def"} = 456 ;
+
+ $txn->txn_commit() ;
+
+ untie %hash ;
+
+ undef $txn ;
+ undef $env ;
+}
+
+{
+ # -ErrFile with a filename
+ my $errfile = "./errfile" ;
+ my $home = "./fred" ;
+ ok 24, my $lexD = new LexDir($home) ;
+ my $lex = new LexFile $errfile ;
+ ok 25, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 26, !$db ;
+
+ ok 27, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/;
+ ok 28, -e $errfile ;
+ my $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 29, $BerkeleyDB::Error eq $contents ;
+
+ undef $env ;
+}
+
+{
+ # -ErrFile with a filehandle
+ use IO ;
+ my $home = "./fred" ;
+ ok 30, my $lexD = new LexDir($home) ;
+ my $errfile = "./errfile" ;
+ my $lex = new LexFile $errfile ;
+ ok 31, my $ef = new IO::File ">$errfile" ;
+ ok 32, my $env = new BerkeleyDB::Env( -ErrFile => $ef ,
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 33, !$db ;
+
+ ok 34, $BerkeleyDB::Error =~ /^illegal flag specified to (db_open|DB->open)/;
+ $ef->close() ;
+ ok 35, -e $errfile ;
+ my $contents = "" ;
+ $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 36, $BerkeleyDB::Error eq $contents ;
+ undef $env ;
+}
+
+{
+ # -ErrPrefix
+ use IO ;
+ my $home = "./fred" ;
+ ok 37, my $lexD = new LexDir($home) ;
+ my $errfile = "./errfile" ;
+ my $lex = new LexFile $errfile ;
+ ok 38, my $env = new BerkeleyDB::Env( -ErrFile => $errfile,
+ -ErrPrefix => "PREFIX",
+ -Flags => DB_CREATE,
+ -Home => $home) ;
+ my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 39, !$db ;
+
+ ok 40, $BerkeleyDB::Error =~ /^PREFIX: illegal flag specified to (db_open|DB->open)/;
+ ok 41, -e $errfile ;
+ my $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 42, $BerkeleyDB::Error eq $contents ;
+
+ # change the prefix on the fly
+ my $old = $env->errPrefix("NEW ONE") ;
+ ok 43, $old eq "PREFIX" ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => -1;
+ ok 44, !$db ;
+ ok 45, $BerkeleyDB::Error =~ /^NEW ONE: illegal flag specified to (db_open|DB->open)/;
+ $contents = docat($errfile) ;
+ chomp $contents ;
+ ok 46, $contents =~ /$BerkeleyDB::Error$/ ;
+ undef $env ;
+}
+
+{
+ # test db_appexit
+ use Cwd ;
+ my $cwd = cwd() ;
+ my $home = "$cwd/fred" ;
+ my $data_dir = "$home/data_dir" ;
+ my $log_dir = "$home/log_dir" ;
+ my $data_file = "data.db" ;
+ ok 47, my $lexD = new LexDir($home);
+ ok 48, -d $data_dir ? chmod 0777, $data_dir : mkdir($data_dir, 0777) ;
+ ok 49, -d $log_dir ? chmod 0777, $log_dir : mkdir($log_dir, 0777) ;
+ my $env = new BerkeleyDB::Env -Home => $home,
+ -Config => { DB_DATA_DIR => $data_dir,
+ DB_LOG_DIR => $log_dir
+ },
+ -Flags => DB_CREATE|DB_INIT_TXN|DB_INIT_LOG|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 50, $env ;
+
+ ok 51, my $txn_mgr = $env->TxnMgr() ;
+
+ ok 52, $env->db_appexit() == 0 ;
+
+}
+
+# test -Verbose
+# test -Flags
+# db_value_set
diff --git a/db/perl/BerkeleyDB/t/examples.t b/db/perl/BerkeleyDB/t/examples.t
new file mode 100644
index 000000000..4cd7b79fc
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/examples.t
@@ -0,0 +1,412 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..7\n";
+
+my $FA = 0 ;
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $x = $BerkeleyDB::Error;
+my $redirect = "xyzt" ;
+ {
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(3, docat_del($redirect) eq <<'EOM') ;
+Smith
+Wall
+mouse
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(4, docat_del($redirect) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+ $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(5, docat_del($redirect) eq <<"EOM") ;
+abc\x00 -> def\x00
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+ $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot Open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ my $val = pack("i", 123) ;
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(6, docat_del($redirect) eq <<"EOM") ;
+$val -> def
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ if ($FA) {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+ unlink $filename ;
+ } else {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $db = tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $db->push("green", "black") ;
+
+ my $elements = $db->length() ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $db->pop ;
+ print "popped $last\n" ;
+
+ $db->unshift("white") ;
+ my $first = $db->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ undef $db ;
+ untie @h ;
+ unlink $filename ;
+ }
+
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(7, docat_del($redirect) eq <<"EOM") ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+EOM
+
+}
+
diff --git a/db/perl/BerkeleyDB/t/examples.t.T b/db/perl/BerkeleyDB/t/examples.t.T
new file mode 100644
index 000000000..fe0922318
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/examples.t.T
@@ -0,0 +1,496 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+print "1..7\n";
+
+my $FA = 0 ;
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $x = $BerkeleyDB::Error;
+my $redirect = "xyzt" ;
+ {
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN simpleHash
+ use strict ;
+ use BerkeleyDB ;
+ use vars qw( %h $k $v ) ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ tie %h, "BerkeleyDB::Hash",
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+## END simpleHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN simpleHash2
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("apple", "red") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("banana", "yellow") ;
+ $db->db_put("tomato", "red") ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $db->db_get("banana", $v) == 0;
+
+ # Delete a key/value pair.
+ $db->db_del("apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END simpleHash2
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN btreeSimple
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+## END btreeSimple
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(3, docat_del($redirect) eq <<'EOM') ;
+Smith
+Wall
+mouse
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN btreeSortOrder
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "tree" ;
+ unlink $filename ;
+ my %h ;
+ tie %h, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Compare => sub { lc $_[0] cmp lc $_[1] }
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+## END btreeSortOrder
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(4, docat_del($redirect) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN nullFilter
+ use strict ;
+ use BerkeleyDB ;
+
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+## END nullFilter
+ $db = tie %hash, 'BerkeleyDB::Hash',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(5, docat_del($redirect) eq <<"EOM") ;
+abc\x00 -> def\x00
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN intFilter
+ use strict ;
+ use BerkeleyDB ;
+ my %hash ;
+ my $filename = "filt.db" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+## END intFilter
+ $db = tie %hash, 'BerkeleyDB::Btree',
+ -Filename => $filename,
+ -Flags => DB_CREATE
+ or die "Cannot Open $filename: $!\n" ;
+ while (($k, $v) = each %hash)
+ { print "$k -> $v\n" }
+ undef $db ;
+ untie %hash ;
+
+ unlink $filename ;
+ }
+
+ my $val = pack("i", 123) ;
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(6, docat_del($redirect) eq <<"EOM") ;
+$val -> def
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ if ($FA) {
+## BEGIN simpleRecno
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ untie @h ;
+## END simpleRecno
+ unlink $filename ;
+ } else {
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $db = tie @h, 'BerkeleyDB::Recno',
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_RENUMBER
+ or die "Cannot open $filename: $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $db->push("green", "black") ;
+
+ my $elements = $db->length() ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $db->pop ;
+ print "popped $last\n" ;
+
+ $db->unshift("white") ;
+ my $first = $db->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ undef $db ;
+ untie @h ;
+ unlink $filename ;
+ }
+
+ }
+
+ #print "[" . docat($redirect) . "]\n" ;
+ ok(7, docat_del($redirect) eq <<"EOM") ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+EOM
+
+}
+
diff --git a/db/perl/BerkeleyDB/t/examples3.t b/db/perl/BerkeleyDB/t/examples3.t
new file mode 100644
index 000000000..72b68eb38
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/examples3.t
@@ -0,0 +1,132 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+
+print "1..2\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> banana
+green -> apple
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> apple
+green -> banana
+EOM
+
+}
+
+
diff --git a/db/perl/BerkeleyDB/t/examples3.t.T b/db/perl/BerkeleyDB/t/examples3.t.T
new file mode 100644
index 000000000..573c04903
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/examples3.t.T
@@ -0,0 +1,217 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use File::Path qw(rmtree);
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+
+print "1..2\n";
+
+my $FA = 0 ;
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+my $redirect = "xyzt" ;
+
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN dupHash
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END dupHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(1, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> banana
+green -> apple
+EOM
+
+}
+
+{
+my $redirect = "xyzt" ;
+ {
+
+ my $redirectObj = new Redirect $redirect ;
+
+## BEGIN dupSortHash
+ use strict ;
+ use BerkeleyDB ;
+
+ my $filename = "fruit" ;
+ unlink $filename ;
+ my $db = new BerkeleyDB::Hash
+ -Filename => $filename,
+ -Flags => DB_CREATE,
+ -Property => DB_DUP | DB_DUPSORT
+ or die "Cannot open file $filename: $! $BerkeleyDB::Error\n" ;
+
+ # Add a few key/value pairs to the file
+ $db->db_put("red", "apple") ;
+ $db->db_put("orange", "orange") ;
+ $db->db_put("green", "banana") ;
+ $db->db_put("yellow", "banana") ;
+ $db->db_put("red", "tomato") ;
+ $db->db_put("green", "apple") ;
+
+ # print the contents of the file
+ my ($k, $v) = ("", "") ;
+ my $cursor = $db->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { print "$k -> $v\n" }
+
+ undef $cursor ;
+ undef $db ;
+## END dupSortHash
+ unlink $filename ;
+ }
+
+ #print "[" . docat($redirect) . "]" ;
+ ok(2, docat_del($redirect) eq <<'EOM') ;
+orange -> orange
+yellow -> banana
+red -> apple
+red -> tomato
+green -> apple
+green -> banana
+EOM
+
+}
+
+
diff --git a/db/perl/BerkeleyDB/t/filter.t b/db/perl/BerkeleyDB/t/filter.t
new file mode 100644
index 000000000..47a7c107a
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/filter.t
@@ -0,0 +1,217 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..46\n";
+
+my $Dfile = "dbhash.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+{
+ # DBM Filter tests
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok 1, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok 2, checkOutput( "", "fred", "", "joe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 3, $h{"fred"} eq "joe";
+ # fk sk fv sv
+ ok 4, checkOutput( "", "fred", "joe", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 5, $db->FIRSTKEY() eq "fred" ;
+ # fk sk fv sv
+ ok 6, checkOutput( "fred", "", "", "") ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok 7, checkOutput( "", "fred", "", "Jxe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 8, $h{"Fred"} eq "[Jxe]";
+ # fk sk fv sv
+ ok 9, checkOutput( "", "fred", "[Jxe]", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 10, $db->FIRSTKEY() eq "FRED" ;
+ # fk sk fv sv
+ ok 11, checkOutput( "FRED", "", "", "") ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok 12, checkOutput( "", "fred", "", "joe") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 13, $h{"fred"} eq "joe";
+ ok 14, checkOutput( "", "fred", "joe", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 15, $db->FIRSTKEY() eq "fred" ;
+ ok 16, checkOutput( "fred", "", "", "") ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok 17, checkOutput( "", "", "", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 18, $h{"fred"} eq "joe";
+ ok 19, checkOutput( "", "", "", "") ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok 20, $db->FIRSTKEY() eq "fred" ;
+ ok 21, checkOutput( "", "", "", "") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok 22, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok 23, $result{"store key"} eq "store key - 1: [fred]" ;
+ ok 24, $result{"store value"} eq "store value - 1: [joe]" ;
+ ok 25, ! defined $result{"fetch key"} ;
+ ok 26, ! defined $result{"fetch value"} ;
+ ok 27, $_ eq "original" ;
+
+ ok 28, $db->FIRSTKEY() eq "fred" ;
+ ok 29, $result{"store key"} eq "store key - 1: [fred]" ;
+ ok 30, $result{"store value"} eq "store value - 1: [joe]" ;
+ ok 31, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 32, ! defined $result{"fetch value"} ;
+ ok 33, $_ eq "original" ;
+
+ $h{"jim"} = "john" ;
+ ok 34, $result{"store key"} eq "store key - 2: [fred jim]" ;
+ ok 35, $result{"store value"} eq "store value - 2: [joe john]" ;
+ ok 36, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 37, ! defined $result{"fetch value"} ;
+ ok 38, $_ eq "original" ;
+
+ ok 39, $h{"fred"} eq "joe" ;
+ ok 40, $result{"store key"} eq "store key - 3: [fred jim fred]" ;
+ ok 41, $result{"store value"} eq "store value - 2: [joe john]" ;
+ ok 42, $result{"fetch key"} eq "fetch key - 1: [fred]" ;
+ ok 43, $result{"fetch value"} eq "fetch value - 1: [joe]" ;
+ ok 44, $_ eq "original" ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok 45, $db = tie %h, 'BerkeleyDB::Hash',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE;
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok 46, $@ =~ /^BerkeleyDB Aborting: recursion detected in filter_store_key at/ ;
+ #print "[$@]\n" ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
diff --git a/db/perl/BerkeleyDB/t/hash.t b/db/perl/BerkeleyDB/t/hash.t
new file mode 100644
index 000000000..661de1763
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/hash.t
@@ -0,0 +1,725 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..210\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Hash -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Hash -Txn => "fred" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Hash -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to HASH
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put("some key", "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get("some key", $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+ ok 12, $db->db_get("key", $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del("some key") == 0 ;
+ ok 15, ($status = $db->db_get("some key", $value)) == DB_NOTFOUND ;
+ ok 16, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 17, $db->status() == DB_NOTFOUND ;
+ ok 18, $db->status() eq $DB_errors{'DB_NOTFOUND'};
+
+ ok 19, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 20, $db->db_put( 'key', 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 21, $db->status() eq $DB_errors{'DB_KEYEXIST'};
+ ok 22, $db->status() == DB_KEYEXIST ;
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 23, $db->db_get("key", $value) == 0 ;
+ ok 24, $value eq "value" ;
+
+ # test DB_GET_BOTH
+ my ($k, $v) = ("key", "value") ;
+ ok 25, $db->db_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("key", "fred") ;
+ ok 26, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("another", "value") ;
+ ok 27, $db->db_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+
+}
+
+{
+ # Check simple env works with a hash.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 28, my $lexD = new LexDir($home);
+
+ ok 29, my $env = new BerkeleyDB::Env -Flags => DB_CREATE| DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 30, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 31, $db->db_put("some key", "some value") == 0 ;
+ ok 32, $db->db_get("some key", $value) == 0 ;
+ ok 33, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+}
+
+{
+ # override default hash
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ $::count = 0 ;
+ ok 34, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Hash => sub { ++$::count ; length $_[0] },
+ -Flags => DB_CREATE ;
+
+ ok 35, $db->db_put("some key", "some value") == 0 ;
+ ok 36, $db->db_get("some key", $value) == 0 ;
+ ok 37, $value eq "some value" ;
+ ok 38, $::count > 0 ;
+
+}
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my ($k, $v) ;
+ ok 39, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => 2,
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 40, $ret == 0 ;
+
+ # create the cursor
+ ok 41, my $cursor = $db->db_cursor() ;
+
+ $k = $v = "" ;
+ my %copy = %data ;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 42, $cursor->status() == DB_NOTFOUND ;
+ ok 43, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 46, $status == DB_NOTFOUND ;
+ ok 47, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 48, $cursor->status() == $status ;
+ ok 49, $cursor->status() eq $status ;
+ ok 50, keys %copy == 0 ;
+ ok 51, $extras == 0 ;
+
+ ($k, $v) = ("green", "house") ;
+ ok 52, $cursor->c_get($k, $v, DB_GET_BOTH) == 0 ;
+
+ ($k, $v) = ("green", "door") ;
+ ok 53, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+ ($k, $v) = ("black", "house") ;
+ ok 54, $cursor->c_get($k, $v, DB_GET_BOTH) == DB_NOTFOUND ;
+
+}
+
+{
+ # Tied Hash interface
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 55, tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # check "each" with an empty database
+ my $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ ++ $count ;
+ }
+ ok 56, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 57, $count == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $hash{"some key"} = "some value";
+ ok 58, (tied %hash)->status() == 0 ;
+ ok 59, $hash{"some key"} eq "some value";
+ ok 60, defined $hash{"some key"} ;
+ ok 61, (tied %hash)->status() == 0 ;
+ ok 62, exists $hash{"some key"} ;
+ ok 63, !defined $hash{"jimmy"} ;
+ ok 64, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 65, !exists $hash{"jimmy"} ;
+ ok 66, (tied %hash)->status() == DB_NOTFOUND ;
+
+ delete $hash{"some key"} ;
+ ok 67, (tied %hash)->status() == 0 ;
+ ok 68, ! defined $hash{"some key"} ;
+ ok 69, (tied %hash)->status() == DB_NOTFOUND ;
+ ok 70, ! exists $hash{"some key"} ;
+ ok 71, (tied %hash)->status() == DB_NOTFOUND ;
+
+ $hash{1} = 2 ;
+ $hash{10} = 20 ;
+ $hash{1000} = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ while (my ($k, $v) = each %hash) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 72, $count == 3 ;
+ ok 73, $keys == 1011 ;
+ ok 74, $values == 2022 ;
+
+ # now clear the hash
+ %hash = () ;
+ ok 75, keys %hash == 0 ;
+
+ untie %hash ;
+}
+
+{
+ # in-memory file
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $fd ;
+ my $value ;
+ ok 76, my $db = tie %hash, 'BerkeleyDB::Hash' ;
+
+ ok 77, $db->db_put("some key", "some value") == 0 ;
+ ok 78, $db->db_get("some key", $value) == 0 ;
+ ok 79, $value eq "some value" ;
+
+ undef $db ;
+ untie %hash ;
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 80, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db->db_put($k, $v) ;
+ }
+ ok 81, $ret == 0 ;
+
+
+ # do a partial get
+ my($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 82, $pon == 0 && $off == 0 && $len == 0 ;
+ ok 83, ( $db->db_get("red", $value) == 0) && $value eq "bo" ;
+ ok 84, ( $db->db_get("green", $value) == 0) && $value eq "ho" ;
+ ok 85, ( $db->db_get("blue", $value) == 0) && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 86, $pon ;
+ ok 87, $off == 0 ;
+ ok 88, $len == 2 ;
+ ok 89, $db->db_get("red", $value) == 0 && $value eq "t" ;
+ ok 90, $db->db_get("green", $value) == 0 && $value eq "se" ;
+ ok 91, $db->db_get("blue", $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 92, $pon ;
+ ok 93, $off == 3 ;
+ ok 94, $len == 2 ;
+ ok 95, $db->db_get("red", $value) == 0 && $value eq "boat" ;
+ ok 96, $db->db_get("green", $value) == 0 && $value eq "house" ;
+ ok 97, $db->db_get("blue", $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 98, ! $pon ;
+ ok 99, $off == 0 ;
+ ok 100, $len == 0 ;
+ ok 101, $db->db_put("red", "") == 0 ;
+ ok 102, $db->db_put("green", "AB") == 0 ;
+ ok 103, $db->db_put("blue", "XYZ") == 0 ;
+ ok 104, $db->db_put("new", "KLM") == 0 ;
+
+ $db->partial_clear() ;
+ ok 105, $db->db_get("red", $value) == 0 && $value eq "at" ;
+ ok 106, $db->db_get("green", $value) == 0 && $value eq "ABuse" ;
+ ok 107, $db->db_get("blue", $value) == 0 && $value eq "XYZa" ;
+ ok 108, $db->db_get("new", $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 109, $db->db_put("red", "PPP") == 0 ;
+ ok 110, $db->db_put("green", "Q") == 0 ;
+ ok 111, $db->db_put("blue", "XYZ") == 0 ;
+ ok 112, $db->db_put("new", "--") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get("red", $value) == 0 && $value eq "at\0PPP" ;
+ ok 117, $db->db_get("green", $value) == 0 && $value eq "ABuQ" ;
+ ok 118, $db->db_get("blue", $value) == 0 && $value eq "XYZXYZ" ;
+ ok 119, $db->db_get("new", $value) == 0 && $value eq "KLM--" ;
+}
+
+{
+ # partial
+ # check works via tied hash
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+ ok 120, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ while (my ($k, $v) = each %data) {
+ $hash{$k} = $v ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 121, $hash{"red"} eq "bo" ;
+ ok 122, $hash{"green"} eq "ho" ;
+ ok 123, $hash{"blue"} eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 124, $hash{"red"} eq "t" ;
+ ok 125, $hash{"green"} eq "se" ;
+ ok 126, $hash{"blue"} eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 127, $hash{"red"} eq "boat" ;
+ ok 128, $hash{"green"} eq "house" ;
+ ok 129, $hash{"blue"} eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 130, $hash{"red"} = "" ;
+ ok 131, $hash{"green"} = "AB" ;
+ ok 132, $hash{"blue"} = "XYZ" ;
+ ok 133, $hash{"new"} = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 134, $hash{"red"} eq "at" ;
+ ok 135, $hash{"green"} eq "ABuse" ;
+ ok 136, $hash{"blue"} eq "XYZa" ;
+ ok 137, $hash{"new"} eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 138, $hash{"red"} = "PPP" ;
+ ok 139, $hash{"green"} = "Q" ;
+ ok 140, $hash{"blue"} = "XYZ" ;
+ ok 141, $hash{"new"} = "TU" ;
+
+ $db->partial_clear() ;
+ ok 142, $hash{"red"} eq "at\0PPP" ;
+ ok 143, $hash{"green"} eq "ABuQ" ;
+ ok 144, $hash{"blue"} eq "XYZXYZ" ;
+ ok 145, $hash{"new"} eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 146, my $lexD = new LexDir($home);
+ ok 147, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 148, my $txn = $env->txn_begin() ;
+ ok 149, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 150, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 151, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 152, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 153, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 154, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 155, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+
+{
+ # DB_DUP
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 156, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hash{'Wall'} = 'Larry' ;
+ $hash{'Wall'} = 'Stone' ;
+ $hash{'Smith'} = 'John' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'Wall'} = 'Brick' ;
+ $hash{'mouse'} = 'mickey' ;
+
+ ok 157, keys %hash == 6 ;
+
+ # create a cursor
+ ok 158, my $cursor = $db->db_cursor() ;
+
+ my $key = "Wall" ;
+ my $value ;
+ ok 159, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 160, $key eq "Wall" && $value eq "Larry" ;
+ ok 161, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 162, $key eq "Wall" && $value eq "Stone" ;
+ ok 163, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 164, $key eq "Wall" && $value eq "Brick" ;
+ ok 165, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 166, $key eq "Wall" && $value eq "Brick" ;
+
+ #my $ref = $db->db_stat() ;
+ #ok 143, $ref->{bt_flags} | DB_DUP ;
+
+ # test DB_DUP_NEXT
+ my ($k, $v) = ("Wall", "") ;
+ ok 167, $cursor->c_get($k, $v, DB_SET) == 0 ;
+ ok 168, $k eq "Wall" && $v eq "Larry" ;
+ ok 169, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 170, $k eq "Wall" && $v eq "Stone" ;
+ ok 171, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 172, $k eq "Wall" && $v eq "Brick" ;
+ ok 173, $cursor->c_get($k, $v, DB_NEXT_DUP) == 0 ;
+ ok 174, $k eq "Wall" && $v eq "Brick" ;
+ ok 175, $cursor->c_get($k, $v, DB_NEXT_DUP) == DB_NOTFOUND ;
+
+
+ undef $db ;
+ undef $cursor ;
+ untie %hash ;
+
+}
+
+{
+ # DB_DUP & DupCompare
+ my $lex = new LexFile $Dfile, $Dfile2;
+ my ($key, $value) ;
+ my (%h, %g) ;
+ my @Keys = qw( 0123 9 12 -1234 9 987654321 9 def ) ;
+ my @Values = qw( 1 11 3 dd x abc 2 0 ) ;
+
+ ok 176, tie %h, "BerkeleyDB::Hash", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Flags => DB_CREATE ;
+
+ ok 177, tie %g, 'BerkeleyDB::Hash', -Filename => $Dfile2,
+ -DupCompare => sub { $_[0] <=> $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Flags => DB_CREATE ;
+
+ foreach (@Keys) {
+ local $^W = 0 ;
+ my $value = shift @Values ;
+ $h{$_} = $value ;
+ $g{$_} = $value ;
+ }
+
+ ok 178, my $cursor = (tied %h)->db_cursor() ;
+ $key = 9 ; $value = "";
+ ok 179, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 180, $key == 9 && $value eq 11 ;
+ ok 181, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 182, $key == 9 && $value == 2 ;
+ ok 183, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 184, $key == 9 && $value eq "x" ;
+
+ $cursor = (tied %g)->db_cursor() ;
+ $key = 9 ;
+ ok 185, $cursor->c_get($key, $value, DB_SET) == 0 ;
+ ok 186, $key == 9 && $value eq "x" ;
+ ok 187, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 188, $key == 9 && $value == 2 ;
+ ok 189, $cursor->c_get($key, $value, DB_NEXT) == 0 ;
+ ok 190, $key == 9 && $value == 11 ;
+
+
+}
+
+{
+ # get_dup etc
+ my $lex = new LexFile $Dfile;
+ my %hh ;
+
+ ok 191, my $YY = tie %hh, "BerkeleyDB::Hash", -Filename => $Dfile,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP,
+ -Flags => DB_CREATE ;
+
+ $hh{'Wall'} = 'Larry' ;
+ $hh{'Wall'} = 'Stone' ; # Note the duplicate key
+ $hh{'Wall'} = 'Brick' ; # Note the duplicate key
+ $hh{'Smith'} = 'John' ;
+ $hh{'mouse'} = 'mickey' ;
+
+ # first work in scalar context
+ ok 192, scalar $YY->get_dup('Unknown') == 0 ;
+ ok 193, scalar $YY->get_dup('Smith') == 1 ;
+ ok 194, scalar $YY->get_dup('Wall') == 3 ;
+
+ # now in list context
+ my @unknown = $YY->get_dup('Unknown') ;
+ ok 195, "@unknown" eq "" ;
+
+ my @smith = $YY->get_dup('Smith') ;
+ ok 196, "@smith" eq "John" ;
+
+ {
+ my @wall = $YY->get_dup('Wall') ;
+ my %wall ;
+ @wall{@wall} = @wall ;
+ ok 197, (@wall == 3 && $wall{'Larry'}
+ && $wall{'Stone'} && $wall{'Brick'});
+ }
+
+ # hash
+ my %unknown = $YY->get_dup('Unknown', 1) ;
+ ok 198, keys %unknown == 0 ;
+
+ my %smith = $YY->get_dup('Smith', 1) ;
+ ok 199, keys %smith == 1 && $smith{'John'} ;
+
+ my %wall = $YY->get_dup('Wall', 1) ;
+ ok 200, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 1 ;
+
+ undef $YY ;
+ untie %hh ;
+
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Hash);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 201, $@ eq "" ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB", -Filename => "dbhash.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 202, $@ eq "" ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok 203, $@ eq "" ;
+ main::ok 204, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put("joe", 4) ; $X->db_get("joe", $value) ; return $value' ;
+ main::ok 205, $@ eq "" ;
+ main::ok 206, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 207, $@ eq "" ;
+ main::ok 208, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok 209, $@ eq "" ;
+ main::ok 210, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbhash.tmp" ;
+
+}
diff --git a/db/perl/BerkeleyDB/t/join.t b/db/perl/BerkeleyDB/t/join.t
new file mode 100644
index 000000000..d63726c79
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/join.t
@@ -0,0 +1,225 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+if ($BerkeleyDB::db_ver < 2.005002)
+{
+ print "1..0 # Skip: join needs Berkeley DB 2.5.2 or later\n" ;
+ exit 0 ;
+}
+
+
+print "1..37\n";
+
+my $Dfile1 = "dbhash1.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile1, $Dfile2, $Dfile3 ;
+
+umask(0) ;
+
+{
+ # error cases
+ my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
+ my %hash1 ;
+ my $value ;
+ my $status ;
+ my $cursor ;
+
+ ok 1, my $db1 = tie %hash1, 'BerkeleyDB::Hash',
+ -Filename => $Dfile1,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] lt $_[1] },
+ -Property => DB_DUP|DB_DUPSORT ;
+
+ # no cursors supplied
+ eval '$cursor = $db1->db_join() ;' ;
+ ok 2, $@ =~ /Usage: \$db->BerkeleyDB::Common::db_join\Q([cursors], flags=0)/;
+
+ # empty list
+ eval '$cursor = $db1->db_join([]) ;' ;
+ ok 3, $@ =~ /db_join: No cursors in parameter list/;
+
+ # cursor list, isn't a []
+ eval '$cursor = $db1->db_join({}) ;' ;
+ ok 4, $@ =~ /cursors is not an array reference at/ ;
+
+ eval '$cursor = $db1->db_join(\1) ;' ;
+ ok 5, $@ =~ /cursors is not an array reference at/ ;
+
+}
+
+{
+ # test a 2-way & 3-way join
+
+ my $lex = new LexFile $Dfile1, $Dfile2, $Dfile3 ;
+ my %hash1 ;
+ my %hash2 ;
+ my %hash3 ;
+ my $value ;
+ my $status ;
+
+ my $home = "./fred" ;
+ ok 6, my $lexD = new LexDir($home);
+ ok 7, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN
+ |DB_INIT_MPOOL;
+ #|DB_INIT_MPOOL| DB_INIT_LOCK;
+ ok 8, my $txn = $env->txn_begin() ;
+ ok 9, my $db1 = tie %hash1, 'BerkeleyDB::Hash',
+ -Filename => $Dfile1,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+ ;
+
+ ok 10, my $db2 = tie %hash2, 'BerkeleyDB::Hash',
+ -Filename => $Dfile2,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 11, my $db3 = tie %hash3, 'BerkeleyDB::Btree',
+ -Filename => $Dfile3,
+ -Flags => DB_CREATE,
+ -DupCompare => sub { $_[0] cmp $_[1] },
+ -Property => DB_DUP|DB_DUPSORT,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ ok 12, addData($db1, qw( apple Convenience
+ peach Shopway
+ pear Farmer
+ raspberry Shopway
+ strawberry Shopway
+ gooseberry Farmer
+ blueberry Farmer
+ ));
+
+ ok 13, addData($db2, qw( red apple
+ red raspberry
+ red strawberry
+ yellow peach
+ yellow pear
+ green gooseberry
+ blue blueberry)) ;
+
+ ok 14, addData($db3, qw( expensive apple
+ reasonable raspberry
+ expensive strawberry
+ reasonable peach
+ reasonable pear
+ expensive gooseberry
+ reasonable blueberry)) ;
+
+ ok 15, my $cursor2 = $db2->db_cursor() ;
+ my $k = "red" ;
+ my $v = "" ;
+ ok 16, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ # Two way Join
+ ok 17, my $cursor1 = $db1->db_join([$cursor2]) ;
+
+ my %expected = qw( apple Convenience
+ raspberry Shopway
+ strawberry Shopway
+ ) ;
+
+ # sequence forwards
+ while ($cursor1->c_get($k, $v) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} && $expected{$k} eq $v ;
+ #print "[$k] [$v]\n" ;
+ }
+ ok 18, keys %expected == 0 ;
+ ok 19, $cursor1->status() == DB_NOTFOUND ;
+
+ # Three way Join
+ ok 20, $cursor2 = $db2->db_cursor() ;
+ $k = "red" ;
+ $v = "" ;
+ ok 21, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ ok 22, my $cursor3 = $db3->db_cursor() ;
+ $k = "expensive" ;
+ $v = "" ;
+ ok 23, $cursor3->c_get($k, $v, DB_SET) == 0 ;
+ ok 24, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
+
+ %expected = qw( apple Convenience
+ strawberry Shopway
+ ) ;
+
+ # sequence forwards
+ while ($cursor1->c_get($k, $v) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} && $expected{$k} eq $v ;
+ #print "[$k] [$v]\n" ;
+ }
+ ok 25, keys %expected == 0 ;
+ ok 26, $cursor1->status() == DB_NOTFOUND ;
+
+ # test DB_JOIN_ITEM
+ # #################
+ ok 27, $cursor2 = $db2->db_cursor() ;
+ $k = "red" ;
+ $v = "" ;
+ ok 28, $cursor2->c_get($k, $v, DB_SET) == 0 ;
+
+ ok 29, $cursor3 = $db3->db_cursor() ;
+ $k = "expensive" ;
+ $v = "" ;
+ ok 30, $cursor3->c_get($k, $v, DB_SET) == 0 ;
+ ok 31, $cursor1 = $db1->db_join([$cursor2, $cursor3]) ;
+
+ %expected = qw( apple 1
+ strawberry 1
+ ) ;
+
+ # sequence forwards
+ $k = "" ;
+ $v = "" ;
+ while ($cursor1->c_get($k, $v, DB_JOIN_ITEM) == 0) {
+ delete $expected{$k}
+ if defined $expected{$k} ;
+ #print "[$k]\n" ;
+ }
+ ok 32, keys %expected == 0 ;
+ ok 33, $cursor1->status() == DB_NOTFOUND ;
+
+ ok 34, $cursor1->c_close() == 0 ;
+ ok 35, $cursor2->c_close() == 0 ;
+ ok 36, $cursor3->c_close() == 0 ;
+
+ ok 37, ($status = $txn->txn_commit) == 0;
+
+ undef $txn ;
+ #undef $cursor1;
+ #undef $cursor2;
+ #undef $cursor3;
+ undef $db1 ;
+ undef $db2 ;
+ undef $db3 ;
+ undef $env ;
+ untie %hash1 ;
+ untie %hash2 ;
+ untie %hash3 ;
+}
+
diff --git a/db/perl/BerkeleyDB/t/mldbm.t b/db/perl/BerkeleyDB/t/mldbm.t
new file mode 100644
index 000000000..fe195fe46
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/mldbm.t
@@ -0,0 +1,159 @@
+#!/usr/bin/perl -w
+
+BEGIN
+{
+ if ($] < 5.005) {
+ print "1..0 # This is Perl $], skipping test\n" ;
+ exit 0 ;
+ }
+
+ eval { require Data::Dumper ; };
+ if ($@) {
+ print "1..0 # Data::Dumper is not installed on this system.\n";
+ exit 0 ;
+ }
+ if ($Data::Dumper::VERSION < 2.08) {
+ print "1..0 # Data::Dumper 2.08 or better required (found $Data::Dumper::VERSION).\n";
+ exit 0 ;
+ }
+ eval { require MLDBM ; };
+ if ($@) {
+ print "1..0 # MLDBM is not installed on this system.\n";
+ exit 0 ;
+ }
+}
+
+use t::util ;
+
+print "1..12\n";
+
+{
+package BTREE ;
+
+use BerkeleyDB ;
+use MLDBM qw(BerkeleyDB::Btree) ;
+use Data::Dumper;
+
+$filename = 'testmldbm' ;
+
+unlink $filename ;
+$MLDBM::UseDB = "BerkeleyDB::Btree" ;
+$db = tie %o, MLDBM, -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+::ok 1, $db ;
+::ok 2, $db->type() == DB_BTREE ;
+
+$c = [\'c'];
+$b = {};
+$a = [1, $b, $c];
+$b->{a} = $a;
+$b->{b} = $a->[1];
+$b->{c} = $a->[2];
+@o{qw(a b c)} = ($a, $b, $c);
+$o{d} = "{once upon a time}";
+$o{e} = 1024;
+$o{f} = 1024.1024;
+$first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+$second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+::ok 3, $first eq $second ;
+::ok 4, $o{d} eq "{once upon a time}" ;
+::ok 5, $o{e} == 1024 ;
+::ok 6, $o{f} eq 1024.1024 ;
+
+unlink $filename ;
+}
+
+{
+
+package HASH ;
+
+use BerkeleyDB ;
+use MLDBM qw(BerkeleyDB::Hash) ;
+use Data::Dumper;
+
+$filename = 'testmldbm' ;
+
+unlink $filename ;
+$MLDBM::UseDB = "BerkeleyDB::Hash" ;
+$db = tie %o, MLDBM, -Filename => $filename,
+ -Flags => DB_CREATE
+ or die $!;
+::ok 7, $db ;
+::ok 8, $db->type() == DB_HASH ;
+
+
+$c = [\'c'];
+$b = {};
+$a = [1, $b, $c];
+$b->{a} = $a;
+$b->{b} = $a->[1];
+$b->{c} = $a->[2];
+@o{qw(a b c)} = ($a, $b, $c);
+$o{d} = "{once upon a time}";
+$o{e} = 1024;
+$o{f} = 1024.1024;
+$first = Data::Dumper->new([@o{qw(a b c)}], [qw(a b c)])->Quotekeys(0)->Dump;
+$second = <<'EOT';
+$a = [
+ 1,
+ {
+ a => $a,
+ b => $a->[1],
+ c => [
+ \'c'
+ ]
+ },
+ $a->[1]{c}
+ ];
+$b = {
+ a => [
+ 1,
+ $b,
+ [
+ \'c'
+ ]
+ ],
+ b => $b,
+ c => $b->{a}[2]
+ };
+$c = [
+ \'c'
+ ];
+EOT
+
+::ok 9, $first eq $second ;
+::ok 10, $o{d} eq "{once upon a time}" ;
+::ok 11, $o{e} == 1024 ;
+::ok 12, $o{f} eq 1024.1024 ;
+
+unlink $filename ;
+
+}
diff --git a/db/perl/BerkeleyDB/t/queue.t b/db/perl/BerkeleyDB/t/queue.t
new file mode 100644
index 000000000..3a0d107d8
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/queue.t
@@ -0,0 +1,746 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3.3) {
+ print "1..0 # Skipping test, Queue needs Berkeley DB 3.3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..197\n";
+
+sub fillout
+{
+ my $var = shift ;
+ my $length = shift ;
+ my $pad = shift || " " ;
+ my $template = $pad x $length ;
+ substr($template, 0, length($var)) = $var ;
+ return $template ;
+}
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Queue -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Queue -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) / ;
+
+ eval ' $db = new BerkeleyDB::Queue -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Queue -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Queue -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Queue
+
+{
+ my $lex = new LexFile $Dfile ;
+ my $rec_len = 10 ;
+ my $pad = "x" ;
+
+ ok 6, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Len => $rec_len,
+ -Pad => $pad;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put(1, "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get(1, $value) == 0 ;
+ ok 10, $value eq fillout("some value", $rec_len, $pad) ;
+ ok 11, $db->db_put(2, "value") == 0 ;
+ ok 12, $db->db_get(2, $value) == 0 ;
+ ok 13, $value eq fillout("value", $rec_len, $pad) ;
+ ok 14, $db->db_del(1) == 0 ;
+ ok 15, ($status = $db->db_get(1, $value)) == DB_KEYEMPTY ;
+ ok 16, $db->status() == DB_KEYEMPTY ;
+ ok 17, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
+
+ ok 18, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
+ ok 19, $db->status() == DB_NOTFOUND ;
+ ok 20, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 21, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 22, $db->db_put( 2, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 23, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 24, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 25, $db->db_get(2, $value) == 0 ;
+ ok 26, $value eq fillout("value", $rec_len, $pad) ;
+
+
+}
+
+
+{
+ # Check simple env works with a array.
+ # and pad defaults to space
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ my $rec_len = 11 ;
+ ok 27, my $lexD = new LexDir($home);
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+ ok 29, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE,
+ -Len => $rec_len;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put(1, "some value") == 0 ;
+ ok 31, $db->db_get(1, $value) == 0 ;
+ ok 32, $value eq fillout("some value", $rec_len) ;
+ undef $db ;
+ undef $env ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my ($k, $v) ;
+ my $rec_len = 5 ;
+ ok 33, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len;
+
+ # create some data
+ my @data = (
+ "red" ,
+ "green" ,
+ "blue" ,
+ ) ;
+
+ my $i ;
+ my %data ;
+ my $ret = 0 ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ $data{$i} = $data[$i] ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = 0 ; $v = "" ;
+ my %copy = %data;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ {
+ if ( fillout($copy{$k}, $rec_len) eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( fillout($copy{$k}, $rec_len) eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+}
+
+{
+ # Tied Array interface
+
+ # full tied array support started in Perl 5.004_57
+ # just double check.
+ my $FA = 0 ;
+ {
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+ }
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ my $rec_len = 10 ;
+ ok 46, $db = tie @array, 'BerkeleyDB::Queue', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len;
+
+ ok 47, my $cursor = (tied @array)->db_cursor() ;
+ # check the database is empty
+ my $count = 0 ;
+ my ($k, $v) = (0,"") ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 48, $cursor->status() == DB_NOTFOUND ;
+ ok 49, $count == 0 ;
+
+ ok 50, @array == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $array[1] = "some value";
+ ok 51, (tied @array)->status() == 0 ;
+ ok 52, $array[1] eq fillout("some value", $rec_len);
+ ok 53, defined $array[1];
+ ok 54, (tied @array)->status() == 0 ;
+ ok 55, !defined $array[3];
+ ok 56, (tied @array)->status() == DB_NOTFOUND ;
+
+ ok 57, (tied @array)->db_del(1) == 0 ;
+ ok 58, (tied @array)->status() == 0 ;
+ ok 59, ! defined $array[1];
+ ok 60, (tied @array)->status() == DB_KEYEMPTY ;
+
+ $array[1] = 2 ;
+ $array[10] = 20 ;
+ $array[1000] = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 61, $count == 3 ;
+ ok 62, $keys == 1011 ;
+ ok 63, $values == 2022 ;
+
+ # unshift isn't allowed
+# eval {
+# $FA ? unshift @array, "red", "green", "blue"
+# : $db->unshift("red", "green", "blue" ) ;
+# } ;
+# ok 64, $@ =~ /^unshift is unsupported with Queue databases/ ;
+ $array[0] = "red" ;
+ $array[1] = "green" ;
+ $array[2] = "blue" ;
+ $array[4] = 2 ;
+ ok 64, $array[0] eq fillout("red", $rec_len) ;
+ ok 65, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 66, $k == 0 ;
+ ok 67, $v eq fillout("red", $rec_len) ;
+ ok 68, $array[1] eq fillout("green", $rec_len) ;
+ ok 69, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 70, $k == 1 ;
+ ok 71, $v eq fillout("green", $rec_len) ;
+ ok 72, $array[2] eq fillout("blue", $rec_len) ;
+ ok 73, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 74, $k == 2 ;
+ ok 75, $v eq fillout("blue", $rec_len) ;
+ ok 76, $array[4] == 2 ;
+ ok 77, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 78, $k == 4 ;
+ ok 79, $v == 2 ;
+
+ # shift
+ ok 80, ($FA ? shift @array : $db->shift()) eq fillout("red", $rec_len) ;
+ ok 81, ($FA ? shift @array : $db->shift()) eq fillout("green", $rec_len) ;
+ ok 82, ($FA ? shift @array : $db->shift()) eq fillout("blue", $rec_len) ;
+ ok 83, ($FA ? shift @array : $db->shift()) == 2 ;
+
+ # push
+ $FA ? push @array, "the", "end"
+ : $db->push("the", "end") ;
+ ok 84, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 85, $k == 1002 ;
+ ok 86, $v eq fillout("end", $rec_len) ;
+ ok 87, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 88, $k == 1001 ;
+ ok 89, $v eq fillout("the", $rec_len) ;
+ ok 90, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 91, $k == 1000 ;
+ ok 92, $v == 2000 ;
+
+ # pop
+ ok 93, ( $FA ? pop @array : $db->pop ) eq fillout("end", $rec_len) ;
+ ok 94, ( $FA ? pop @array : $db->pop ) eq fillout("the", $rec_len) ;
+ ok 95, ( $FA ? pop @array : $db->pop ) == 2000 ;
+
+ # now clear the array
+ $FA ? @array = ()
+ : $db->clear() ;
+ ok 96, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
+
+ undef $cursor ;
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory file
+
+ my @array ;
+ my $fd ;
+ my $value ;
+ my $rec_len = 15 ;
+ ok 97, my $db = tie @array, 'BerkeleyDB::Queue',
+ -Len => $rec_len;
+
+ ok 98, $db->db_put(1, "some value") == 0 ;
+ ok 99, $db->db_get(1, $value) == 0 ;
+ ok 100, $value eq fillout("some value", $rec_len) ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ my $rec_len = 8 ;
+ ok 101, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ my $r = $db->db_put($i, $data[$i]) ;
+ $ret += $r ;
+ }
+ ok 102, $ret == 0 ;
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 103, ! $pon && $off == 0 && $len == 0 ;
+ ok 104, $db->db_get(1, $value) == 0 && $value eq "bo" ;
+ ok 105, $db->db_get(2, $value) == 0 && $value eq "ho" ;
+ ok 106, $db->db_get(3, $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 107, $pon ;
+ ok 108, $off == 0 ;
+ ok 109, $len == 2 ;
+ ok 110, $db->db_get(1, $value) == 0 && $value eq fillout("t", 2) ;
+ ok 111, $db->db_get(2, $value) == 0 && $value eq "se" ;
+ ok 112, $db->db_get(3, $value) == 0 && $value eq " " ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
+ ok 117, $db->db_get(2, $value) == 0 && $value eq fillout("house", $rec_len) ;
+ ok 118, $db->db_get(3, $value) == 0 && $value eq fillout("sea", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 119, $db->db_put(1, "") != 0 ;
+ ok 120, $db->db_put(2, "AB") == 0 ;
+ ok 121, $db->db_put(3, "XY") == 0 ;
+ ok 122, $db->db_put(4, "KLM") != 0 ;
+ ok 123, $db->db_put(4, "KL") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 124, $pon ;
+ ok 125, $off == 0 ;
+ ok 126, $len == 2 ;
+ ok 127, $db->db_get(1, $value) == 0 && $value eq fillout("boat", $rec_len) ;
+ ok 128, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse", $rec_len) ;
+ ok 129, $db->db_get(3, $value) == 0 && $value eq fillout("XYa", $rec_len) ;
+ ok 130, $db->db_get(4, $value) == 0 && $value eq fillout("KL", $rec_len) ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 131, ! $pon ;
+ ok 132, $off == 0 ;
+ ok 133, $len == 0 ;
+ ok 134, $db->db_put(1, "PP") == 0 ;
+ ok 135, $db->db_put(2, "Q") != 0 ;
+ ok 136, $db->db_put(3, "XY") == 0 ;
+ ok 137, $db->db_put(4, "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 138, $db->db_get(1, $value) == 0 && $value eq fillout("boaPP", $rec_len) ;
+ ok 139, $db->db_get(2, $value) == 0 && $value eq fillout("ABuse",$rec_len) ;
+ ok 140, $db->db_get(3, $value) == 0 && $value eq fillout("XYaXY", $rec_len) ;
+ ok 141, $db->db_get(4, $value) == 0 && $value eq fillout("KL TU", $rec_len) ;
+}
+
+{
+ # partial
+ # check works via tied array
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+ my $rec_len = 8 ;
+ ok 142, my $db = tie @array, 'BerkeleyDB::Queue', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $i ;
+ my $status = 0 ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $array[$i] = $data[$i] ;
+ $status += $db->status() ;
+ }
+
+ ok 143, $status == 0 ;
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 144, $array[1] eq fillout("bo", 2) ;
+ ok 145, $array[2] eq fillout("ho", 2) ;
+ ok 146, $array[3] eq fillout("se", 2) ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 147, $array[1] eq fillout("t", 2) ;
+ ok 148, $array[2] eq fillout("se", 2) ;
+ ok 149, $array[3] eq fillout("", 2) ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 150, $array[1] eq fillout("boat", $rec_len) ;
+ ok 151, $array[2] eq fillout("house", $rec_len) ;
+ ok 152, $array[3] eq fillout("sea", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ $array[1] = "" ;
+ ok 153, $db->status() != 0 ;
+ $array[2] = "AB" ;
+ ok 154, $db->status() == 0 ;
+ $array[3] = "XY" ;
+ ok 155, $db->status() == 0 ;
+ $array[4] = "KL" ;
+ ok 156, $db->status() == 0 ;
+
+ $db->partial_clear() ;
+ ok 157, $array[1] eq fillout("boat", $rec_len) ;
+ ok 158, $array[2] eq fillout("ABuse", $rec_len) ;
+ ok 159, $array[3] eq fillout("XYa", $rec_len) ;
+ ok 160, $array[4] eq fillout("KL", $rec_len) ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ $array[1] = "PP" ;
+ ok 161, $db->status() == 0 ;
+ $array[2] = "Q" ;
+ ok 162, $db->status() != 0 ;
+ $array[3] = "XY" ;
+ ok 163, $db->status() == 0 ;
+ $array[4] = "TU" ;
+ ok 164, $db->status() == 0 ;
+
+ $db->partial_clear() ;
+ ok 165, $array[1] eq fillout("boaPP", $rec_len) ;
+ ok 166, $array[2] eq fillout("ABuse", $rec_len) ;
+ ok 167, $array[3] eq fillout("XYaXY", $rec_len) ;
+ ok 168, $array[4] eq fillout("KL TU", $rec_len) ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 169, my $lexD = new LexDir($home);
+ my $rec_len = 9 ;
+ ok 170, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 171, my $txn = $env->txn_begin() ;
+ ok 172, my $db1 = tie @array, 'BerkeleyDB::Queue',
+ -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+
+ # create some data
+ my @data = (
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db1->db_put($i, $data[$i]) ;
+ }
+ ok 173, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 174, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = (0, "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 175, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 176, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 177, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 178, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie @array ;
+}
+
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "qs_ndata" : "qs_nrecs") ;
+ my @array ;
+ my ($k, $v) ;
+ my $rec_len = 7 ;
+ ok 179, my $db = new BerkeleyDB::Queue -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Pagesize => 4 * 1024,
+ -Len => $rec_len,
+ -Pad => " "
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 180, $ref->{$recs} == 0;
+ ok 181, $ref->{'qs_pagesize'} == 4 * 1024;
+
+ # create some data
+ my @data = (
+ 2,
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = $db->ArrayOffset ; @data ; ++$i) {
+ $ret += $db->db_put($i, shift @data) ;
+ }
+ ok 182, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 183, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Queue);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 184, $@ eq "" ;
+ my @h ;
+ my $X ;
+ my $rec_len = 34 ;
+ eval '
+ $X = tie(@h, "SubDB", -Filename => "dbbtree.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 ,
+ -Len => $rec_len,
+ -Pad => " "
+ );
+ ' ;
+
+ main::ok 185, $@ eq "" ;
+
+ my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
+ main::ok 186, $@ eq "" ;
+ main::ok 187, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok 192, $@ eq "" ;
+ main::ok 193, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DB_APPEND
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ my $rec_len = 21 ;
+ ok 194, my $db = tie @array, 'BerkeleyDB::Queue',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Len => $rec_len,
+ -Pad => " " ;
+
+ # create a few records
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ my $k = 0 ;
+ ok 195, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 196, $k == 4 ;
+ ok 197, $array[4] eq fillout("fred", $rec_len) ;
+
+ undef $db ;
+ untie @array ;
+}
+
+__END__
+
+
+# TODO
+#
+# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/db/perl/BerkeleyDB/t/recno.t b/db/perl/BerkeleyDB/t/recno.t
new file mode 100644
index 000000000..4da7ae2e0
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/recno.t
@@ -0,0 +1,877 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..218\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Recno -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Recno -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) / ;
+
+ eval ' $db = new BerkeleyDB::Recno -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Recno -Txn => "x" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Recno -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# Now check the interface to Recno
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ my $status ;
+ ok 7, $db->db_put(1, "some value") == 0 ;
+ ok 8, $db->status() == 0 ;
+ ok 9, $db->db_get(1, $value) == 0 ;
+ ok 10, $value eq "some value" ;
+ ok 11, $db->db_put(2, "value") == 0 ;
+ ok 12, $db->db_get(2, $value) == 0 ;
+ ok 13, $value eq "value" ;
+ ok 14, $db->db_del(1) == 0 ;
+ ok 15, ($status = $db->db_get(1, $value)) == DB_KEYEMPTY ;
+ ok 16, $db->status() == DB_KEYEMPTY ;
+ ok 17, $db->status() eq $DB_errors{'DB_KEYEMPTY'} ;
+
+ ok 18, ($status = $db->db_get(7, $value)) == DB_NOTFOUND ;
+ ok 19, $db->status() == DB_NOTFOUND ;
+ ok 20, $db->status() eq $DB_errors{'DB_NOTFOUND'} ;
+
+ ok 21, $db->db_sync() == 0 ;
+
+ # Check NOOVERWRITE will make put fail when attempting to overwrite
+ # an existing record.
+
+ ok 22, $db->db_put( 2, 'x', DB_NOOVERWRITE) == DB_KEYEXIST ;
+ ok 23, $db->status() eq $DB_errors{'DB_KEYEXIST'} ;
+ ok 24, $db->status() == DB_KEYEXIST ;
+
+
+ # check that the value of the key has not been changed by the
+ # previous test
+ ok 25, $db->db_get(2, $value) == 0 ;
+ ok 26, $value eq "value" ;
+
+
+}
+
+
+{
+ # Check simple env works with a array.
+ my $lex = new LexFile $Dfile ;
+
+ my $home = "./fred" ;
+ ok 27, my $lexD = new LexDir($home);
+
+ ok 28, my $env = new BerkeleyDB::Env -Flags => DB_CREATE|DB_INIT_MPOOL,
+ -Home => $home ;
+
+ ok 29, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Env => $env,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my $value ;
+ ok 30, $db->db_put(1, "some value") == 0 ;
+ ok 31, $db->db_get(1, $value) == 0 ;
+ ok 32, $value eq "some value" ;
+ undef $db ;
+ undef $env ;
+}
+
+
+{
+ # cursors
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my ($k, $v) ;
+ ok 33, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "red" ,
+ "green" ,
+ "blue" ,
+ ) ;
+
+ my $i ;
+ my %data ;
+ my $ret = 0 ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ $data{$i} = $data[$i] ;
+ }
+ ok 34, $ret == 0 ;
+
+ # create the cursor
+ ok 35, my $cursor = $db->db_cursor() ;
+
+ $k = 0 ; $v = "" ;
+ my %copy = %data;
+ my $extras = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+
+ ok 36, $cursor->status() == DB_NOTFOUND ;
+ ok 37, $cursor->status() eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 38, keys %copy == 0 ;
+ ok 39, $extras == 0 ;
+
+ # sequence backwards
+ %copy = %data ;
+ $extras = 0 ;
+ my $status ;
+ for ( $status = $cursor->c_get($k, $v, DB_LAST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_PREV)) {
+ if ( $copy{$k} eq $v )
+ { delete $copy{$k} }
+ else
+ { ++ $extras }
+ }
+ ok 40, $status == DB_NOTFOUND ;
+ ok 41, $status eq $DB_errors{'DB_NOTFOUND'} ;
+ ok 42, $cursor->status() == $status ;
+ ok 43, $cursor->status() eq $status ;
+ ok 44, keys %copy == 0 ;
+ ok 45, $extras == 0 ;
+}
+
+{
+ # Tied Array interface
+
+ # full tied array support started in Perl 5.004_57
+ # just double check.
+ my $FA = 0 ;
+ {
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+ }
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $db ;
+ ok 46, $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Property => DB_RENUMBER,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+
+ ok 47, my $cursor = (tied @array)->db_cursor() ;
+ # check the database is empty
+ my $count = 0 ;
+ my ($k, $v) = (0,"") ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 48, $cursor->status() == DB_NOTFOUND ;
+ ok 49, $count == 0 ;
+
+ ok 50, @array == 0 ;
+
+ # Add a k/v pair
+ my $value ;
+ $array[1] = "some value";
+ ok 51, (tied @array)->status() == 0 ;
+ ok 52, $array[1] eq "some value";
+ ok 53, defined $array[1];
+ ok 54, (tied @array)->status() == 0 ;
+ ok 55, !defined $array[3];
+ ok 56, (tied @array)->status() == DB_NOTFOUND ;
+
+ ok 57, (tied @array)->db_del(1) == 0 ;
+ ok 58, (tied @array)->status() == 0 ;
+ ok 59, ! defined $array[1];
+ ok 60, (tied @array)->status() == DB_NOTFOUND ;
+
+ $array[1] = 2 ;
+ $array[10] = 20 ;
+ $array[1000] = 2000 ;
+
+ my ($keys, $values) = (0,0);
+ $count = 0 ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ $keys += $k ;
+ $values += $v ;
+ ++ $count ;
+ }
+ ok 61, $count == 3 ;
+ ok 62, $keys == 1011 ;
+ ok 63, $values == 2022 ;
+
+ # unshift
+ $FA ? unshift @array, "red", "green", "blue"
+ : $db->unshift("red", "green", "blue" ) ;
+ ok 64, $array[1] eq "red" ;
+ ok 65, $cursor->c_get($k, $v, DB_FIRST) == 0 ;
+ ok 66, $k == 1 ;
+ ok 67, $v eq "red" ;
+ ok 68, $array[2] eq "green" ;
+ ok 69, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 70, $k == 2 ;
+ ok 71, $v eq "green" ;
+ ok 72, $array[3] eq "blue" ;
+ ok 73, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 74, $k == 3 ;
+ ok 75, $v eq "blue" ;
+ ok 76, $array[4] == 2 ;
+ ok 77, $cursor->c_get($k, $v, DB_NEXT) == 0 ;
+ ok 78, $k == 4 ;
+ ok 79, $v == 2 ;
+
+ # shift
+ ok 80, ($FA ? shift @array : $db->shift()) eq "red" ;
+ ok 81, ($FA ? shift @array : $db->shift()) eq "green" ;
+ ok 82, ($FA ? shift @array : $db->shift()) eq "blue" ;
+ ok 83, ($FA ? shift @array : $db->shift()) == 2 ;
+
+ # push
+ $FA ? push @array, "the", "end"
+ : $db->push("the", "end") ;
+ ok 84, $cursor->c_get($k, $v, DB_LAST) == 0 ;
+ ok 85, $k == 1001 ;
+ ok 86, $v eq "end" ;
+ ok 87, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 88, $k == 1000 ;
+ ok 89, $v eq "the" ;
+ ok 90, $cursor->c_get($k, $v, DB_PREV) == 0 ;
+ ok 91, $k == 999 ;
+ ok 92, $v == 2000 ;
+
+ # pop
+ ok 93, ( $FA ? pop @array : $db->pop ) eq "end" ;
+ ok 94, ( $FA ? pop @array : $db->pop ) eq "the" ;
+ ok 95, ( $FA ? pop @array : $db->pop ) == 2000 ;
+
+ # now clear the array
+ $FA ? @array = ()
+ : $db->clear() ;
+ ok 96, $cursor->c_get($k, $v, DB_FIRST) == DB_NOTFOUND ;
+
+ undef $cursor ;
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory file
+
+ my @array ;
+ my $fd ;
+ my $value ;
+ ok 97, my $db = tie @array, 'BerkeleyDB::Recno' ;
+
+ ok 98, $db->db_put(1, "some value") == 0 ;
+ ok 99, $db->db_get(1, $value) == 0 ;
+ ok 100, $value eq "some value" ;
+
+}
+
+{
+ # partial
+ # check works via API
+
+ my $lex = new LexFile $Dfile ;
+ my $value ;
+ ok 101, my $db = new BerkeleyDB::Recno, -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $ret += $db->db_put($i, $data[$i]) ;
+ }
+ ok 102, $ret == 0 ;
+
+
+ # do a partial get
+ my ($pon, $off, $len) = $db->partial_set(0,2) ;
+ ok 103, ! $pon && $off == 0 && $len == 0 ;
+ ok 104, $db->db_get(1, $value) == 0 && $value eq "bo" ;
+ ok 105, $db->db_get(2, $value) == 0 && $value eq "ho" ;
+ ok 106, $db->db_get(3, $value) == 0 && $value eq "se" ;
+
+ # do a partial get, off end of data
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 107, $pon ;
+ ok 108, $off == 0 ;
+ ok 109, $len == 2 ;
+ ok 110, $db->db_get(1, $value) == 0 && $value eq "t" ;
+ ok 111, $db->db_get(2, $value) == 0 && $value eq "se" ;
+ ok 112, $db->db_get(3, $value) == 0 && $value eq "" ;
+
+ # switch of partial mode
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 113, $pon ;
+ ok 114, $off == 3 ;
+ ok 115, $len == 2 ;
+ ok 116, $db->db_get(1, $value) == 0 && $value eq "boat" ;
+ ok 117, $db->db_get(2, $value) == 0 && $value eq "house" ;
+ ok 118, $db->db_get(3, $value) == 0 && $value eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 119, $db->db_put(1, "") == 0 ;
+ ok 120, $db->db_put(2, "AB") == 0 ;
+ ok 121, $db->db_put(3, "XYZ") == 0 ;
+ ok 122, $db->db_put(4, "KLM") == 0 ;
+
+ ($pon, $off, $len) = $db->partial_clear() ;
+ ok 123, $pon ;
+ ok 124, $off == 0 ;
+ ok 125, $len == 2 ;
+ ok 126, $db->db_get(1, $value) == 0 && $value eq "at" ;
+ ok 127, $db->db_get(2, $value) == 0 && $value eq "ABuse" ;
+ ok 128, $db->db_get(3, $value) == 0 && $value eq "XYZa" ;
+ ok 129, $db->db_get(4, $value) == 0 && $value eq "KLM" ;
+
+ # now partial put
+ ($pon, $off, $len) = $db->partial_set(3,2) ;
+ ok 130, ! $pon ;
+ ok 131, $off == 0 ;
+ ok 132, $len == 0 ;
+ ok 133, $db->db_put(1, "PPP") == 0 ;
+ ok 134, $db->db_put(2, "Q") == 0 ;
+ ok 135, $db->db_put(3, "XYZ") == 0 ;
+ ok 136, $db->db_put(4, "TU") == 0 ;
+
+ $db->partial_clear() ;
+ ok 137, $db->db_get(1, $value) == 0 && $value eq "at\0PPP" ;
+ ok 138, $db->db_get(2, $value) == 0 && $value eq "ABuQ" ;
+ ok 139, $db->db_get(3, $value) == 0 && $value eq "XYZXYZ" ;
+ ok 140, $db->db_get(4, $value) == 0 && $value eq "KLMTU" ;
+}
+
+{
+ # partial
+ # check works via tied array
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+ ok 141, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create some data
+ my @data = (
+ "",
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $i ;
+ for ($i = 1 ; $i < @data ; ++$i) {
+ $array[$i] = $data[$i] ;
+ }
+
+
+ # do a partial get
+ $db->partial_set(0,2) ;
+ ok 142, $array[1] eq "bo" ;
+ ok 143, $array[2] eq "ho" ;
+ ok 144, $array[3] eq "se" ;
+
+ # do a partial get, off end of data
+ $db->partial_set(3,2) ;
+ ok 145, $array[1] eq "t" ;
+ ok 146, $array[2] eq "se" ;
+ ok 147, $array[3] eq "" ;
+
+ # switch of partial mode
+ $db->partial_clear() ;
+ ok 148, $array[1] eq "boat" ;
+ ok 149, $array[2] eq "house" ;
+ ok 150, $array[3] eq "sea" ;
+
+ # now partial put
+ $db->partial_set(0,2) ;
+ ok 151, $array[1] = "" ;
+ ok 152, $array[2] = "AB" ;
+ ok 153, $array[3] = "XYZ" ;
+ ok 154, $array[4] = "KLM" ;
+
+ $db->partial_clear() ;
+ ok 155, $array[1] eq "at" ;
+ ok 156, $array[2] eq "ABuse" ;
+ ok 157, $array[3] eq "XYZa" ;
+ ok 158, $array[4] eq "KLM" ;
+
+ # now partial put
+ $db->partial_set(3,2) ;
+ ok 159, $array[1] = "PPP" ;
+ ok 160, $array[2] = "Q" ;
+ ok 161, $array[3] = "XYZ" ;
+ ok 162, $array[4] = "TU" ;
+
+ $db->partial_clear() ;
+ ok 163, $array[1] eq "at\0PPP" ;
+ ok 164, $array[2] eq "ABuQ" ;
+ ok 165, $array[3] eq "XYZXYZ" ;
+ ok 166, $array[4] eq "KLMTU" ;
+}
+
+{
+ # transaction
+
+ my $lex = new LexFile $Dfile ;
+ my @array ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 167, my $lexD = new LexDir($home);
+ ok 168, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 169, my $txn = $env->txn_begin() ;
+ ok 170, my $db1 = tie @array, 'BerkeleyDB::Recno',
+ -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my @data = (
+ "boat",
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = 0 ; $i < @data ; ++$i) {
+ $ret += $db1->db_put($i, $data[$i]) ;
+ }
+ ok 171, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 172, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = (0, "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 173, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 174, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 175, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 176, $count == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie @array ;
+}
+
+
+{
+ # db_stat
+
+ my $lex = new LexFile $Dfile ;
+ my $recs = ($BerkeleyDB::db_version >= 3.1 ? "bt_ndata" : "bt_nrecs") ;
+ my @array ;
+ my ($k, $v) ;
+ ok 177, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE,
+ -Pagesize => 4 * 1024,
+ ;
+
+ my $ref = $db->db_stat() ;
+ ok 178, $ref->{$recs} == 0;
+ ok 179, $ref->{'bt_pagesize'} == 4 * 1024;
+
+ # create some data
+ my @data = (
+ 2,
+ "house",
+ "sea",
+ ) ;
+
+ my $ret = 0 ;
+ my $i ;
+ for ($i = $db->ArrayOffset ; @data ; ++$i) {
+ $ret += $db->db_put($i, shift @data) ;
+ }
+ ok 180, $ret == 0 ;
+
+ $ref = $db->db_stat() ;
+ ok 181, $ref->{$recs} == 3;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use BerkeleyDB;
+ @ISA=qw(BerkeleyDB::Recno);
+ @EXPORT = @BerkeleyDB::EXPORT ;
+
+ sub db_put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::db_put($key, $value * 3) ;
+ }
+
+ sub db_get {
+ my $self = shift ;
+ $self->SUPER::db_get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok 182, $@ eq "" ;
+ my @h ;
+ my $X ;
+ eval '
+ $X = tie(@h, "SubDB", -Filename => "dbbtree.tmp",
+ -Flags => DB_CREATE,
+ -Mode => 0640 );
+ ' ;
+
+ main::ok 183, $@ eq "" ;
+
+ my $ret = eval '$h[1] = 3 ; return $h[1] ' ;
+ main::ok 184, $@ eq "" ;
+ main::ok 185, $ret == 7 ;
+
+ my $value = 0;
+ $ret = eval '$X->db_put(1, 4) ; $X->db_get(1, $value) ; return $value' ;
+ main::ok 186, $@ eq "" ;
+ main::ok 187, $ret == 10 ;
+
+ $ret = eval ' DB_NEXT eq main::DB_NEXT ' ;
+ main::ok 188, $@ eq "" ;
+ main::ok 189, $ret == 1 ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok 190, $@ eq "" ;
+ main::ok 191, $ret eq "[[10]]" ;
+
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # variable length records, DB_DELIMETER -- defaults to \n
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 192, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 193, $x eq "abc\ndef\n\nghi\n" ;
+}
+
+{
+ # variable length records, change DB_DELIMETER
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 194, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ,
+ -Delim => "-";
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 195, $x eq "abc-def--ghi-";
+}
+
+{
+ # fixed length records, default DB_PAD
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 196, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => 5,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 197, $x eq "abc def ghi " ;
+}
+
+{
+ # fixed length records, change Pad
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 198, tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Len => 5,
+ -Pad => "-",
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 199, $x eq "abc--def-------ghi--" ;
+}
+
+{
+ # DB_RENUMBER
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ ok 200, my $db = tie @array, 'BerkeleyDB::Recno', -Filename => $Dfile,
+ -Property => DB_RENUMBER,
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ;
+ # create a few records
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ ok 201, my ($length, $joined) = joiner($db, "|") ;
+ ok 202, $length == 3 ;
+ ok 203, $joined eq "abc|def|ghi";
+
+ ok 204, $db->db_del(1) == 0 ;
+ ok 205, ($length, $joined) = joiner($db, "|") ;
+ ok 206, $length == 2 ;
+ ok 207, $joined eq "abc|ghi";
+
+ undef $db ;
+ untie @array ;
+
+}
+
+{
+ # DB_APPEND
+
+ my $lex = new LexFile $Dfile;
+ my @array ;
+ my $value ;
+ ok 208, my $db = tie @array, 'BerkeleyDB::Recno',
+ -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # create a few records
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+
+ my $k = 0 ;
+ ok 209, $db->db_put($k, "fred", DB_APPEND) == 0 ;
+ ok 210, $k == 4 ;
+
+ undef $db ;
+ untie @array ;
+}
+
+{
+ # in-memory Btree with an associated text file
+
+ my $lex = new LexFile $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 211, tie @array, 'BerkeleyDB::Recno', -Source => $Dfile2 ,
+ -ArrayBase => 0,
+ -Property => DB_RENUMBER,
+ -Flags => DB_CREATE ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 212, $x eq "abc\ndef\n\nghi\n" ;
+}
+
+{
+ # in-memory, variable length records, change DB_DELIMETER
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 213, tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Source => $Dfile2 ,
+ -Property => DB_RENUMBER,
+ -Delim => "-";
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 214, $x eq "abc-def--ghi-";
+}
+
+{
+ # in-memory, fixed length records, default DB_PAD
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 215, tie @array, 'BerkeleyDB::Recno', -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Len => 5,
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 216, $x eq "abc def ghi " ;
+}
+
+{
+ # in-memory, fixed length records, change Pad
+
+ my $lex = new LexFile $Dfile, $Dfile2 ;
+ touch $Dfile2 ;
+ my @array ;
+ my $value ;
+ ok 217, tie @array, 'BerkeleyDB::Recno',
+ -ArrayBase => 0,
+ -Flags => DB_CREATE ,
+ -Property => DB_RENUMBER,
+ -Len => 5,
+ -Pad => "-",
+ -Source => $Dfile2 ;
+ $array[0] = "abc" ;
+ $array[1] = "def" ;
+ $array[3] = "ghi" ;
+ untie @array ;
+
+ my $x = docat($Dfile2) ;
+ ok 218, $x eq "abc--def-------ghi--" ;
+}
+
+__END__
+
+
+# TODO
+#
+# DB_DELIMETER DB_FIXEDLEN DB_PAD DB_SNAPSHOT with partial records
diff --git a/db/perl/BerkeleyDB/t/strict.t b/db/perl/BerkeleyDB/t/strict.t
new file mode 100644
index 000000000..a87d5312e
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/strict.t
@@ -0,0 +1,172 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..44\n";
+
+my $Dfile = "dbhash.tmp";
+my $home = "./fred" ;
+
+umask(0);
+
+{
+ # closing a database & an environment in the correct order.
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ ok 1, my $lexD = new LexDir($home);
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 3, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env;
+
+ ok 4, $db1->db_close() == 0 ;
+
+ eval { $status = $env->db_appexit() ; } ;
+ ok 5, $status == 0 ;
+ ok 6, $@ eq "" ;
+ #print "[$@]\n" ;
+
+}
+
+{
+ # closing an environment with an open database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+
+ ok 7, my $lexD = new LexDir($home);
+ ok 8, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 9, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env;
+
+ eval { $env->db_appexit() ; } ;
+ ok 10, $@ =~ /BerkeleyDB Aborting: attempted to close an environment with 1 open database/ ;
+ #print "[$@]\n" ;
+
+ undef $db1 ;
+ untie %hash ;
+ undef $env ;
+}
+
+{
+ # closing a transaction & a database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ ok 11, my $lexD = new LexDir($home);
+ ok 12, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 13, my $txn = $env->txn_begin() ;
+ ok 14, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ ok 15, $txn->txn_commit() == 0 ;
+ eval { $status = $db->db_close() ; } ;
+ ok 16, $status == 0 ;
+ ok 17, $@ eq "" ;
+ eval { $status = $env->db_appexit() ; } ;
+ ok 18, $status == 0 ;
+ ok 19, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a database with an open transaction
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+
+ ok 20, my $lexD = new LexDir($home);
+ ok 21, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+
+ ok 22, my $txn = $env->txn_begin() ;
+ ok 23, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+ eval { $db->db_close() ; } ;
+ ok 24, $@ =~ /BerkeleyDB Aborting: attempted to close a database while a transaction was still open at/ ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a cursor & a database
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+ ok 25, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ ok 26, my $cursor = $db->db_cursor() ;
+ ok 27, $cursor->c_close() == 0 ;
+ eval { $status = $db->db_close() ; } ;
+ ok 28, $status == 0 ;
+ ok 29, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a database with an open cursor
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ ok 30, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+ ok 31, my $cursor = $db->db_cursor() ;
+ eval { $db->db_close() ; } ;
+ ok 32, $@ =~ /\QBerkeleyDB Aborting: attempted to close a database with 1 open cursor(s) at/;
+ #print "[$@]\n" ;
+}
+
+{
+ # closing a transaction & a cursor
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $status ;
+
+ ok 33, my $lexD = new LexDir($home);
+ ok 34, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 35, my $txn = $env->txn_begin() ;
+ ok 36, my $db = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+ ok 37, my $cursor = $db->db_cursor() ;
+ eval { $status = $cursor->c_close() ; } ;
+ ok 38, $status == 0 ;
+ ok 39, ($status = $txn->txn_commit()) == 0 ;
+ ok 40, $@ eq "" ;
+ eval { $status = $db->db_close() ; } ;
+ ok 41, $status == 0 ;
+ ok 42, $@ eq "" ;
+ eval { $status = $env->db_appexit() ; } ;
+ ok 43, $status == 0 ;
+ ok 44, $@ eq "" ;
+ #print "[$@]\n" ;
+}
+
diff --git a/db/perl/BerkeleyDB/t/subdb.t b/db/perl/BerkeleyDB/t/subdb.t
new file mode 100644
index 000000000..c48ec6e25
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/subdb.t
@@ -0,0 +1,242 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+BEGIN
+{
+ if ($BerkeleyDB::db_version < 3) {
+ print "1..0 # Skipping test, this needs Berkeley DB 3.x or better\n" ;
+ exit 0 ;
+ }
+}
+
+print "1..43\n";
+
+my $Dfile = "dbhash.tmp";
+my $Dfile2 = "dbhash2.tmp";
+my $Dfile3 = "dbhash3.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+# Berkeley DB 3.x specific functionality
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' BerkeleyDB::db_remove -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' BerkeleyDB::db_remove -Bad => 2, -Filename => "fred", -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' BerkeleyDB::db_remove -Filename => "a", -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' BerkeleyDB::db_remove -Subname => "a"' ;
+ ok 4, $@ =~ /^Must specify a filename/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' BerkeleyDB::db_remove -Filename => "x", -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+{
+ # subdatabases
+
+ # opening a subdatabse in an exsiting database that doesn't have
+ # subdatabases at all should fail
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 6, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 7, addData($db, %data) ;
+
+ undef $db ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ;
+ ok 8, ! $db ;
+
+ ok 9, -e $Dfile ;
+ ok 10, ! BerkeleyDB::db_remove(-Filename => $Dfile) ;
+}
+
+{
+ # subdatabases
+
+ # opening a subdatabse in an exsiting database that does have
+ # subdatabases at all, but not this one
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 11, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 12, addData($db, %data) ;
+
+ undef $db ;
+
+ $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "joe" ;
+
+ ok 13, !$db ;
+
+}
+
+{
+ # subdatabases
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 14, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 15, addData($db, %data) ;
+
+ undef $db ;
+
+ ok 16, $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ;
+
+ ok 17, my $cursor = $db->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $status ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ if ($data{$k} eq $v) {
+ delete $data{$k} ;
+ }
+ }
+ ok 18, $status == DB_NOTFOUND ;
+ ok 19, keys %data == 0 ;
+}
+
+{
+ # subdatabases
+
+ # opening a database with multiple subdatabases - handle should be a list
+ # of the subdatabase names
+
+ my $lex = new LexFile $Dfile ;
+
+ ok 20, my $db1 = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Subname => "fred" ,
+ -Flags => DB_CREATE ;
+
+ ok 21, my $db2 = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Subname => "joe" ,
+ -Flags => DB_CREATE ;
+
+ # Add a k/v pair
+ my %data = qw(
+ red sky
+ blue sea
+ black heart
+ yellow belley
+ green grass
+ ) ;
+
+ ok 22, addData($db1, %data) ;
+ ok 23, addData($db2, %data) ;
+
+ undef $db1 ;
+ undef $db2 ;
+
+ ok 24, my $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ #my $type = $db->type() ; print "type $type\n" ;
+ ok 25, my $cursor = $db->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $status ;
+ my @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 26, $status == DB_NOTFOUND ;
+ ok 27, join(",", sort @dbnames) eq "fred,joe" ;
+ undef $db ;
+
+ ok 28, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "harry") != 0;
+ ok 29, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") == 0 ;
+
+ # should only be one subdatabase
+ ok 30, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ ok 31, $cursor = $db->db_cursor() ;
+ @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 32, $status == DB_NOTFOUND ;
+ ok 33, join(",", sort @dbnames) eq "joe" ;
+ undef $db ;
+
+ # can't delete an already deleted subdatabase
+ ok 34, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "fred") != 0;
+
+ ok 35, BerkeleyDB::db_remove(-Filename => $Dfile, -Subname => "joe") == 0 ;
+
+ # should only be one subdatabase
+ ok 36, $db = new BerkeleyDB::Unknown -Filename => $Dfile ,
+ -Flags => DB_RDONLY ;
+
+ ok 37, $cursor = $db->db_cursor() ;
+ @dbnames = () ;
+ while (($status = $cursor->c_get($k, $v, DB_NEXT)) == 0) {
+ push @dbnames, $k ;
+ }
+ ok 38, $status == DB_NOTFOUND ;
+ ok 39, @dbnames == 0 ;
+ undef $db ;
+
+ ok 40, -e $Dfile ;
+ ok 41, BerkeleyDB::db_remove(-Filename => $Dfile) == 0 ;
+ ok 42, ! -e $Dfile ;
+ ok 43, BerkeleyDB::db_remove(-Filename => $Dfile) != 0 ;
+}
+
+# db_remove with env
diff --git a/db/perl/BerkeleyDB/t/txn.t b/db/perl/BerkeleyDB/t/txn.t
new file mode 100644
index 000000000..c746c8d9b
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/txn.t
@@ -0,0 +1,306 @@
+#!./perl -w
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..50\n";
+
+my $Dfile = "dbhash.tmp";
+
+umask(0);
+
+{
+ # error cases
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 1, my $lexD = new LexDir($home);
+ ok 2, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE| DB_INIT_MPOOL;
+ eval { $env->txn_begin() ; } ;
+ ok 3, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
+
+ eval { my $txn_mgr = $env->TxnMgr() ; } ;
+ ok 4, $@ =~ /^BerkeleyDB Aborting: Transaction Manager not enabled at/ ;
+ undef $env ;
+
+}
+
+{
+ # transaction - abort works
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 5, my $lexD = new LexDir($home);
+ ok 6, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 7, my $txn = $env->txn_begin() ;
+ ok 8, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 9, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 10, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 11, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 12, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 13, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 14, $count == 0 ;
+
+ my $stat = $env->txn_stat() ;
+ ok 15, $stat->{'st_naborts'} == 1 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # transaction - abort works via txnmgr
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 16, my $lexD = new LexDir($home);
+ ok 17, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 18, my $txn_mgr = $env->TxnMgr() ;
+ ok 19, my $txn = $txn_mgr->txn_begin() ;
+ ok 20, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 21, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 22, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 23, $count == 3 ;
+ undef $cursor ;
+
+ # now abort the transaction
+ ok 24, $txn->txn_abort() == 0 ;
+
+ # there shouldn't be any records in the database
+ $count = 0 ;
+ # sequence forwards
+ ok 25, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 26, $count == 0 ;
+
+ my $stat = $txn_mgr->txn_stat() ;
+ ok 27, $stat->{'st_naborts'} == 1 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $txn_mgr ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # transaction - commit works
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 28, my $lexD = new LexDir($home);
+ ok 29, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 30, my $txn = $env->txn_begin() ;
+ ok 31, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 32, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 33, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 34, $count == 3 ;
+ undef $cursor ;
+
+ # now commit the transaction
+ ok 35, $txn->txn_commit() == 0 ;
+
+ $count = 0 ;
+ # sequence forwards
+ ok 36, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 37, $count == 3 ;
+
+ my $stat = $env->txn_stat() ;
+ ok 38, $stat->{'st_naborts'} == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $env ;
+ untie %hash ;
+}
+
+{
+ # transaction - commit works via txnmgr
+
+ my $lex = new LexFile $Dfile ;
+ my %hash ;
+ my $value ;
+
+ my $home = "./fred" ;
+ ok 39, my $lexD = new LexDir($home);
+ ok 40, my $env = new BerkeleyDB::Env -Home => $home,
+ -Flags => DB_CREATE|DB_INIT_TXN|
+ DB_INIT_MPOOL|DB_INIT_LOCK ;
+ ok 41, my $txn_mgr = $env->TxnMgr() ;
+ ok 42, my $txn = $txn_mgr->txn_begin() ;
+ ok 43, my $db1 = tie %hash, 'BerkeleyDB::Hash', -Filename => $Dfile,
+ -Flags => DB_CREATE ,
+ -Env => $env,
+ -Txn => $txn ;
+
+
+ # create some data
+ my %data = (
+ "red" => "boat",
+ "green" => "house",
+ "blue" => "sea",
+ ) ;
+
+ my $ret = 0 ;
+ while (my ($k, $v) = each %data) {
+ $ret += $db1->db_put($k, $v) ;
+ }
+ ok 44, $ret == 0 ;
+
+ # should be able to see all the records
+
+ ok 45, my $cursor = $db1->db_cursor() ;
+ my ($k, $v) = ("", "") ;
+ my $count = 0 ;
+ # sequence forwards
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 46, $count == 3 ;
+ undef $cursor ;
+
+ # now commit the transaction
+ ok 47, $txn->txn_commit() == 0 ;
+
+ $count = 0 ;
+ # sequence forwards
+ ok 48, $cursor = $db1->db_cursor() ;
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0) {
+ ++ $count ;
+ }
+ ok 49, $count == 3 ;
+
+ my $stat = $txn_mgr->txn_stat() ;
+ ok 50, $stat->{'st_naborts'} == 0 ;
+
+ undef $txn ;
+ undef $cursor ;
+ undef $db1 ;
+ undef $txn_mgr ;
+ undef $env ;
+ untie %hash ;
+}
+
diff --git a/db/perl/BerkeleyDB/t/unknown.t b/db/perl/BerkeleyDB/t/unknown.t
new file mode 100644
index 000000000..f2630b585
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/unknown.t
@@ -0,0 +1,176 @@
+#!./perl -w
+
+# ID: %I%, %G%
+
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use BerkeleyDB;
+use t::util ;
+
+print "1..41\n";
+
+my $Dfile = "dbhash.tmp";
+unlink $Dfile;
+
+umask(0) ;
+
+
+# Check for invalid parameters
+{
+ # Check for invalid parameters
+ my $db ;
+ eval ' $db = new BerkeleyDB::Unknown -Stupid => 3 ; ' ;
+ ok 1, $@ =~ /unknown key value\(s\) Stupid/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Bad => 2, -Mode => 0345, -Stupid => 3; ' ;
+ ok 2, $@ =~ /unknown key value\(s\) (Bad |Stupid ){2}/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Env => 2 ' ;
+ ok 3, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+
+ eval ' $db = new BerkeleyDB::Unknown -Txn => "fred" ' ;
+ ok 4, $@ =~ /^Txn not of type BerkeleyDB::Txn/ ;
+
+ my $obj = bless [], "main" ;
+ eval ' $db = new BerkeleyDB::Unknown -Env => $obj ' ;
+ ok 5, $@ =~ /^Env not of type BerkeleyDB::Env/ ;
+}
+
+# check the interface to a rubbish database
+{
+ # first an empty file
+ my $lex = new LexFile $Dfile ;
+ ok 6, writeFile($Dfile, "") ;
+
+ ok 7, ! (new BerkeleyDB::Unknown -Filename => $Dfile);
+
+ # now a non-database file
+ writeFile($Dfile, "\x2af6") ;
+ ok 8, ! (new BerkeleyDB::Unknown -Filename => $Dfile);
+}
+
+# check the interface to a Hash database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a hash database
+ ok 9, my $db = new BerkeleyDB::Hash -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 10, $db->db_put("some key", "some value") == 0 ;
+ ok 11, $db->db_put("key", "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ ok 12, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 13, $db->type() == DB_HASH ;
+ ok 14, $db->db_get("some key", $value) == 0 ;
+ ok 15, $value eq "some value" ;
+ ok 16, $db->db_get("key", $value) == 0 ;
+ ok 17, $value eq "value" ;
+
+ my @array ;
+ eval { $db->Tie(\@array)} ;
+ ok 18, $@ =~ /^Tie needs a reference to a hash/ ;
+
+ my %hash ;
+ $db->Tie(\%hash) ;
+ ok 19, $hash{"some key"} eq "some value" ;
+
+}
+
+# check the interface to a Btree database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a hash database
+ ok 20, my $db = new BerkeleyDB::Btree -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 21, $db->db_put("some key", "some value") == 0 ;
+ ok 22, $db->db_put("key", "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ # create a hash database
+ ok 23, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 24, $db->type() == DB_BTREE ;
+ ok 25, $db->db_get("some key", $value) == 0 ;
+ ok 26, $value eq "some value" ;
+ ok 27, $db->db_get("key", $value) == 0 ;
+ ok 28, $value eq "value" ;
+
+
+ my @array ;
+ eval { $db->Tie(\@array)} ;
+ ok 29, $@ =~ /^Tie needs a reference to a hash/ ;
+
+ my %hash ;
+ $db->Tie(\%hash) ;
+ ok 30, $hash{"some key"} eq "some value" ;
+
+
+}
+
+# check the interface to a Recno database
+
+{
+ my $lex = new LexFile $Dfile ;
+
+ # create a recno database
+ ok 31, my $db = new BerkeleyDB::Recno -Filename => $Dfile,
+ -Flags => DB_CREATE ;
+
+ # Add a few k/v pairs
+ my $value ;
+ my $status ;
+ ok 32, $db->db_put(0, "some value") == 0 ;
+ ok 33, $db->db_put(1, "value") == 0 ;
+
+ # close the database
+ undef $db ;
+
+ # now open it with Unknown
+ # create a hash database
+ ok 34, $db = new BerkeleyDB::Unknown -Filename => $Dfile;
+
+ ok 35, $db->type() == DB_RECNO ;
+ ok 36, $db->db_get(0, $value) == 0 ;
+ ok 37, $value eq "some value" ;
+ ok 38, $db->db_get(1, $value) == 0 ;
+ ok 39, $value eq "value" ;
+
+
+ my %hash ;
+ eval { $db->Tie(\%hash)} ;
+ ok 40, $@ =~ /^Tie needs a reference to an array/ ;
+
+ my @array ;
+ $db->Tie(\@array) ;
+ ok 41, $array[1] eq "value" ;
+
+
+}
+
+# check i/f to text
diff --git a/db/perl/BerkeleyDB/t/util.pm b/db/perl/BerkeleyDB/t/util.pm
new file mode 100644
index 000000000..0c7f0deb5
--- /dev/null
+++ b/db/perl/BerkeleyDB/t/util.pm
@@ -0,0 +1,180 @@
+package util ;
+
+package main ;
+
+use strict ;
+use BerkeleyDB ;
+use File::Path qw(rmtree);
+use vars qw(%DB_errors) ;
+
+%DB_errors = (
+ 'DB_INCOMPLETE' => "DB_INCOMPLETE: Sync was unable to complete",
+ 'DB_KEYEMPTY' => "DB_KEYEMPTY: Non-existent key/data pair",
+ 'DB_KEYEXIST' => "DB_KEYEXIST: Key/data pair already exists",
+ 'DB_LOCK_DEADLOCK' => "DB_LOCK_DEADLOCK: Locker killed to resolve a deadlock",
+ 'DB_LOCK_NOTGRANTED' => "DB_LOCK_NOTGRANTED: Lock not granted",
+ 'DB_NOTFOUND' => "DB_NOTFOUND: No matching key/data pair found",
+ 'DB_OLD_VERSION' => "DB_OLDVERSION: Database requires a version upgrade",
+ 'DB_RUNRECOVERY' => "DB_RUNRECOVERY: Fatal error, run database recovery",
+) ;
+
+{
+ package LexFile ;
+
+ sub new
+ {
+ my $self = shift ;
+ unlink @_ ;
+ bless [ @_ ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ unlink @{ $self } ;
+ }
+}
+
+
+{
+ package LexDir ;
+
+ use File::Path qw(rmtree);
+
+ sub new
+ {
+ my $self = shift ;
+ my $dir = shift ;
+
+ rmtree $dir if -e $dir ;
+
+ mkdir $dir, 0777 or return undef ;
+
+ return bless [ $dir ], $self ;
+ }
+
+ sub DESTROY
+ {
+ my $self = shift ;
+ rmtree $self->[0] ;
+ }
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT> || "" ;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+sub writeFile
+{
+ my $name = shift ;
+ open(FH, ">$name") or return 0 ;
+ print FH @_ ;
+ close FH ;
+ return 1 ;
+}
+
+sub touch
+{
+ my $file = shift ;
+ open(CAT,">$file") || die "Cannot open $file:$!";
+ close(CAT);
+}
+
+sub joiner
+{
+ my $db = shift ;
+ my $sep = shift ;
+ my ($k, $v) = (0, "") ;
+ my @data = () ;
+
+ my $cursor = $db->db_cursor() or return () ;
+ for ( my $status = $cursor->c_get($k, $v, DB_FIRST) ;
+ $status == 0 ;
+ $status = $cursor->c_get($k, $v, DB_NEXT)) {
+ push @data, $v ;
+ }
+
+ (scalar(@data), join($sep, @data)) ;
+}
+
+sub countRecords
+{
+ my $db = shift ;
+ my ($k, $v) = (0,0) ;
+ my ($count) = 0 ;
+ my ($cursor) = $db->db_cursor() ;
+ #for ($status = $cursor->c_get($k, $v, DB_FIRST) ;
+# $status == 0 ;
+# $status = $cursor->c_get($k, $v, DB_NEXT) )
+ while ($cursor->c_get($k, $v, DB_NEXT) == 0)
+ { ++ $count }
+
+ return $count ;
+}
+
+sub addData
+{
+ my $db = shift ;
+ my @data = @_ ;
+ die "addData odd data\n" if @data % 2 != 0 ;
+ my ($k, $v) ;
+ my $ret = 0 ;
+ while (@data) {
+ $k = shift @data ;
+ $v = shift @data ;
+ $ret += $db->db_put($k, $v) ;
+ }
+
+ return ($ret == 0) ;
+}
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+
+1;
diff --git a/db/perl/BerkeleyDB/typemap b/db/perl/BerkeleyDB/typemap
new file mode 100644
index 000000000..9feefdc47
--- /dev/null
+++ b/db/perl/BerkeleyDB/typemap
@@ -0,0 +1,275 @@
+# typemap for Perl 5 interface to Berkeley DB version 2 & 3
+#
+# SCCS: %I%, %G%
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+#
+#################################### DB SECTION
+#
+#
+
+void * T_PV
+u_int T_U_INT
+u_int32_t T_U_INT
+const char * T_PV_NULL
+PV_or_NULL T_PV_NULL
+IO_or_NULL T_IO_NULL
+
+AV * T_AV
+
+BerkeleyDB T_PTROBJ
+BerkeleyDB::Common T_PTROBJ_AV
+BerkeleyDB::Hash T_PTROBJ_AV
+BerkeleyDB::Btree T_PTROBJ_AV
+BerkeleyDB::Recno T_PTROBJ_AV
+BerkeleyDB::Queue T_PTROBJ_AV
+BerkeleyDB::Cursor T_PTROBJ_AV
+BerkeleyDB::TxnMgr T_PTROBJ_AV
+BerkeleyDB::Txn T_PTROBJ_AV
+BerkeleyDB::Log T_PTROBJ_AV
+BerkeleyDB::Lock T_PTROBJ_AV
+BerkeleyDB::Env T_PTROBJ_AV
+
+BerkeleyDB::Raw T_RAW
+BerkeleyDB::Common::Raw T_RAW
+BerkeleyDB::Hash::Raw T_RAW
+BerkeleyDB::Btree::Raw T_RAW
+BerkeleyDB::Recno::Raw T_RAW
+BerkeleyDB::Queue::Raw T_RAW
+BerkeleyDB::Cursor::Raw T_RAW
+BerkeleyDB::TxnMgr::Raw T_RAW
+BerkeleyDB::Txn::Raw T_RAW
+BerkeleyDB::Log::Raw T_RAW
+BerkeleyDB::Lock::Raw T_RAW
+BerkeleyDB::Env::Raw T_RAW
+
+BerkeleyDB::Env::Inner T_INNER
+BerkeleyDB::Common::Inner T_INNER
+BerkeleyDB::Txn::Inner T_INNER
+BerkeleyDB::TxnMgr::Inner T_INNER
+# BerkeleyDB__Env T_PTR
+DBT T_dbtdatum
+DBT_OPT T_dbtdatum_opt
+DBT_B T_dbtdatum_btree
+DBTKEY T_dbtkeydatum
+DBTKEY_B T_dbtkeydatum_btree
+DBTYPE T_U_INT
+DualType T_DUAL
+BerkeleyDB_type * T_IV
+BerkeleyDB_ENV_type * T_IV
+BerkeleyDB_TxnMgr_type * T_IV
+BerkeleyDB_Txn_type * T_IV
+BerkeleyDB__Cursor_type * T_IV
+DB * T_IV
+
+INPUT
+
+T_AV
+ if (SvROK($arg) && SvTYPE(SvRV($arg)) == SVt_PVAV)
+ /* if (sv_isa($arg, \"${ntype}\")) */
+ $var = (AV*)SvRV($arg);
+ else
+ croak(\"$var is not an array reference\")
+
+T_RAW
+ $var = INT2PTR($type,SvIV($arg)
+
+T_U_INT
+ $var = SvUV($arg)
+
+T_SV_REF_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV *)GetInternalObject($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_HV_REF_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ HV * hv = (HV *)GetInternalObject($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_HV_REF
+ if (sv_derived_from($arg, \"${ntype}\")) {
+ HV * hv = (HV *)GetInternalObject($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+
+T_P_REF
+ if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+
+T_INNER
+ {
+ HV * hv = (HV *)SvRV($arg);
+ SV ** svp = hv_fetch(hv, \"db\", 2, FALSE);
+ IV tmp = SvIV(*svp);
+ $var = INT2PTR($type, tmp);
+ }
+
+T_PV_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else {
+ $var = ($type)SvPV($arg,PL_na) ;
+ if (PL_na == 0)
+ $var = NULL ;
+ }
+
+T_IO_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else
+ $var = IoOFP(sv_2io($arg))
+
+T_PTROBJ_NULL
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_PTROBJ_SELF
+ if ($arg == &PL_sv_undef)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV((SV*)SvRV($arg));
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_PTROBJ_AV
+ if ($arg == &PL_sv_undef || $arg == NULL)
+ $var = NULL ;
+ else if (sv_derived_from($arg, \"${ntype}\")) {
+ IV tmp = SvIV(getInnerObject($arg)) ;
+ $var = INT2PTR($type, tmp);
+ }
+ else
+ croak(\"$var is not of type ${ntype}\")
+
+T_dbtkeydatum
+ ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->recno_or_queue) {
+ Value = GetRecnoKey(db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(db_recno_t);
+ }
+ else {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+T_dbtkeydatum_btree
+ ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->recno_or_queue ||
+ (db->type == DB_BTREE && flagSet(DB_SET_RECNO))) {
+ Value = GetRecnoKey(db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(db_recno_t);
+ }
+ else {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+T_dbtdatum
+ ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBT_clear($var) ;
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+
+T_dbtdatum_opt
+ DBT_clear($var) ;
+ if (flagSet(DB_GET_BOTH)) {
+ ckFilter($arg, filter_store_value, \"filter_store_value\");
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+ }
+
+T_dbtdatum_btree
+ DBT_clear($var) ;
+ if (flagSet(DB_GET_BOTH)) {
+ ckFilter($arg, filter_store_value, \"filter_store_value\");
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ $var.flags = db->partial ;
+ $var.dlen = db->dlen ;
+ $var.doff = db->doff ;
+ }
+
+
+OUTPUT
+
+T_RAW
+ sv_setiv($arg, PTR2IV($var));
+
+T_SV_REF_NULL
+ sv_setiv($arg, PTR2IV($var));
+
+T_HV_REF_NULL
+ sv_setiv($arg, PTR2IV($var));
+
+T_HV_REF
+ sv_setiv($arg, PTR2IV($var));
+
+T_P_REF
+ sv_setiv($arg, PTR2IV($var));
+
+T_DUAL
+ setDUALerrno($arg, $var) ;
+
+T_U_INT
+ sv_setuv($arg, (UV)$var);
+
+T_PV_NULL
+ sv_setpv((SV*)$arg, $var);
+
+T_dbtkeydatum_btree
+ OutputKey_B($arg, $var)
+T_dbtkeydatum
+ OutputKey($arg, $var)
+T_dbtdatum
+ OutputValue($arg, $var)
+T_dbtdatum_opt
+ OutputValue($arg, $var)
+T_dbtdatum_btree
+ OutputValue_B($arg, $var)
+
+T_PTROBJ_NULL
+ sv_setref_pv($arg, \"${ntype}\", (void*)$var);
+
+T_PTROBJ_SELF
+ sv_setref_pv($arg, self, (void*)$var);
diff --git a/db/perl/DB_File/Changes b/db/perl/DB_File/Changes
new file mode 100644
index 000000000..da6af577c
--- /dev/null
+++ b/db/perl/DB_File/Changes
@@ -0,0 +1,368 @@
+
+0.1
+
+ First Release.
+
+0.2
+
+ When DB_File is opening a database file it no longer terminates the
+ process if dbopen returned an error. This allows file protection
+ errors to be caught at run time. Thanks to Judith Grass
+ <grass@cybercash.com> for spotting the bug.
+
+0.3
+
+ Added prototype support for multiple btree compare callbacks.
+
+1.0
+
+ DB_File has been in use for over a year. To reflect that, the
+ version number has been incremented to 1.0.
+
+ Added complete support for multiple concurrent callbacks.
+
+ Using the push method on an empty list didn't work properly. This
+ has been fixed.
+
+1.01
+
+ Fixed a core dump problem with SunOS.
+
+ The return value from TIEHASH wasn't set to NULL when dbopen
+ returned an error.
+
+1.02
+
+ Merged OS/2 specific code into DB_File.xs
+
+ Removed some redundant code in DB_File.xs.
+
+ Documentation update.
+
+ Allow negative subscripts with RECNO interface.
+
+ Changed the default flags from O_RDWR to O_CREAT|O_RDWR.
+
+ The example code which showed how to lock a database needed a call
+ to sync added. Without it the resultant database file was empty.
+
+ Added get_dup method.
+
+1.03
+
+ Documentation update.
+
+ DB_File now imports the constants (O_RDWR, O_CREAT etc.) from Fcntl
+ automatically.
+
+ The standard hash function exists is now supported.
+
+ Modified the behavior of get_dup. When it returns an associative
+ array, the value is the count of the number of matching BTREE
+ values.
+
+1.04
+
+ Minor documentation changes.
+
+ Fixed a bug in hash_cb. Patches supplied by Dave Hammen,
+ <hammen@gothamcity.jsc.nasa.govt>.
+
+ Fixed a bug with the constructors for DB_File::HASHINFO,
+ DB_File::BTREEINFO and DB_File::RECNOINFO. Also tidied up the
+ constructors to make them -w clean.
+
+ Reworked part of the test harness to be more locale friendly.
+
+1.05
+
+ Made all scripts in the documentation strict and -w clean.
+
+ Added logic to DB_File.xs to allow the module to be built after
+ Perl is installed.
+
+1.06
+
+ Minor namespace cleanup: Localized PrintBtree.
+
+1.07
+
+ Fixed bug with RECNO, where bval wasn't defaulting to "\n".
+
+1.08
+
+ Documented operation of bval.
+
+1.09
+
+ Minor bug fix in DB_File::HASHINFO, DB_File::RECNOINFO and
+ DB_File::BTREEINFO.
+
+ Changed default mode to 0666.
+
+1.10
+
+ Fixed fd method so that it still returns -1 for in-memory files
+ when db 1.86 is used.
+
+1.11
+
+ Documented the untie gotcha.
+
+1.12
+
+ Documented the incompatibility with version 2 of Berkeley DB.
+
+1.13
+
+ Minor changes to DB_FIle.xs and DB_File.pm
+
+1.14
+
+ Made it illegal to tie an associative array to a RECNO database and
+ an ordinary array to a HASH or BTREE database.
+
+1.15
+
+ Patch from Gisle Aas <gisle@aas.no> to suppress "use of undefined
+ value" warning with db_get and db_seq.
+
+ Patch from Gisle Aas <gisle@aas.no> to make DB_File export only the
+ O_* constants from Fcntl.
+
+ Removed the DESTROY method from the DB_File::HASHINFO module.
+
+ Previously DB_File hard-wired the class name of any object that it
+ created to "DB_File". This makes sub-classing difficult. Now
+ DB_File creats objects in the namespace of the package it has been
+ inherited into.
+
+
+1.16
+
+ A harmless looking tab was causing Makefile.PL to fail on AIX 3.2.5
+
+ Small fix for the AIX strict C compiler XLC which doesn't like
+ __attribute__ being defined via proto.h and redefined via db.h. Fix
+ courtesy of Jarkko Hietaniemi.
+
+1.50
+
+ DB_File can now build with either DB 1.x or 2.x, but not both at
+ the same time.
+
+1.51
+
+ Fixed the test harness so that it doesn't expect DB_File to have
+ been installed by the main Perl build.
+
+
+ Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
+
+1.52
+
+ Patch from Nick Ing-Simmons now allows DB_File to build on NT.
+ Merged 1.15 patch.
+
+1.53
+
+ Added DB_RENUMBER to flags for recno.
+
+1.54
+
+ Fixed a small bug in the test harness when run under win32
+ The emulation of fd when useing DB 2.x was busted.
+
+1.55
+ Merged 1.16 changes.
+
+1.56
+ Documented the Solaris 2.5 mutex bug
+
+1.57
+ If Perl has been compiled with Threads support,the symbol op will be
+ defined. This clashes with a field name in db.h, so it needs to be
+ #undef'ed before db.h is included.
+
+1.58
+ Tied Array support was enhanced in Perl 5.004_57. DB_File now
+ supports PUSH,POP,SHIFT,UNSHIFT & STORESIZE.
+
+ Fixed a problem with the use of sv_setpvn. When the size is
+ specified as 0, it does a strlen on the data. This was ok for DB
+ 1.x, but isn't for DB 2.x.
+
+1.59
+ Updated the license section.
+
+ Berkeley DB 2.4.10 disallows zero length keys. Tests 32 & 42 in
+ db-btree.t and test 27 in db-hash.t failed because of this change.
+ Those tests have been zapped.
+
+ Added dbinfo to the distribution.
+
+1.60
+ Changed the test to check for full tied array support
+
+1.61 19th November 1998
+
+ Added a note to README about how to build Berkeley DB 2.x when
+ using HP-UX.
+ Minor modifications to get the module to build with DB 2.5.x
+ Fixed a typo in the definition of O_RDONLY, courtesy of Mark Kettenis.
+
+1.62 30th November 1998
+
+ Added hints/dynixptx.pl.
+ Fixed typemap -- 1.61 used PL_na instead of na
+
+1.63 19th December 1998
+
+ * Fix to allow DB 2.6.x to build with DB_File
+ * Documentation updated to use push,pop etc in the RECNO example &
+ to include the find_dup & del_dup methods.
+
+1.64 21st February 1999
+
+ * Tidied the 1.x to 2.x flag mapping code.
+ * Added a patch from Mark Kettenis <kettenis@wins.uva.nl> to fix a flag
+ mapping problem with O_RDONLY on the Hurd
+ * Updated the message that db-recno.t prints when tests 51, 53 or 55 fail.
+
+1.65 6th March 1999
+
+ * Fixed a bug in the recno PUSH logic.
+ * The BOOT version check now needs 2.3.4 when using Berkeley DB version 2
+
+1.66 15th March 1999
+
+ * Added DBM Filter code
+
+1.67 6th June 1999
+
+ * Added DBM Filter documentation to DB_File.pm
+
+ * Fixed DBM Filter code to work with 5.004
+
+ * A few instances of newSVpvn were used in 1.66. This isn't available in
+ Perl 5.004_04 or earlier. Replaced with newSVpv.
+
+1.68 22nd July 1999
+
+ * Merged changes from 5.005_58
+
+ * Fixed a bug in R_IBEFORE & R_IAFTER procesing in Berkeley DB
+ 2 databases.
+
+ * Added some of the examples in the POD into the test harness.
+
+1.69 3rd August 1999
+
+ * fixed a bug in push -- DB_APPEND wasn't working properly.
+
+ * Fixed the R_SETCURSOR bug introduced in 1.68
+
+ * Added a new Perl variable $DB_File::db_ver
+
+1.70 4th August 1999
+
+ * Initialise $DB_File::db_ver and $DB_File::db_version with
+ GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
+
+ * Added a BOOT check to test for equivalent versions of db.h &
+ libdb.a/so.
+
+1.71 7th September 1999
+
+ * Fixed a bug that prevented 1.70 from compiling under win32
+
+ * Updated to support Berkeley DB 3.x
+
+ * Updated dbinfo for Berkeley DB 3.x file formats.
+
+1.72 16th January 2000
+
+ * Added hints/sco.pl
+
+ * The module will now use XSLoader when it is available. When it
+ isn't it will use DynaLoader.
+
+ * The locking section in DB_File.pm has been discredited. Many thanks
+ to David Harris for spotting the underlying problem, contributing
+ the updates to the documentation and writing DB_File::Lock (available
+ on CPAN).
+
+1.73 31st May 2000
+
+ * Added support in version.c for building with threaded Perl.
+
+ * Berkeley DB 3.1 has reenabled support for null keys. The test
+ harness has been updated to reflect this.
+
+1.74 10th December 2000
+
+ * A "close" call in DB_File.xs needed parenthesised to stop win32 from
+ thinking it was one of its macros.
+
+ * Updated dbinfo to support Berkeley DB 3.1 file format changes.
+
+ * DB_File.pm & the test hasness now use the warnings pragma (when
+ available).
+
+ * Included Perl core patch 7703 -- size argument for hash_cb is different
+ for Berkeley DB 3.x
+
+ * Included Perl core patch 7801 -- Give __getBerkeleyDBInfo the ANSI C
+ treatment.
+
+ * @a = () produced the warning 'Argument "" isn't numeric in entersub'
+ This has been fixed. Thanks to Edward Avis for spotting this bug.
+
+ * Added note about building under Linux. Included patches.
+
+ * Included Perl core patch 8068 -- fix for bug 20001013.009
+ When run with warnings enabled "$hash{XX} = undef " produced an
+ "Uninitialized value" warning. This has been fixed.
+
+1.75 17th December 2000
+
+ * Fixed perl core patch 7703
+
+ * Added suppport to allow DB_File to be built with Berkeley DB 3.2 --
+ btree_compare, btree_prefix and hash_cb needed to be changed.
+
+ * Updated dbinfo to support Berkeley DB 3.2 file format changes.
+
+
+1.76 15th January 2001
+
+ * Added instructions for using LD_PRELOAD to get Berkeley DB 2.x to work
+ with DB_File on Linux. Thanks to Norbert Bollow for sending details of
+ this approach.
+
+
+1.77 26th April 2001
+
+ * AIX is reported to need -lpthreads, so Makefile.PL now checks for AIX and
+ adds it to the link options.
+
+ * Minor documentation updates.
+
+ * Merged Core patch 9176
+
+ * Added a patch from Edward Avis that adds support for splice with
+ recno databases.
+
+ * Modified Makefile.PL to only enable the warnings pragma if using perl
+ 5.6.1 or better.
+
+1.78 30th July 2001
+
+ * the test in Makefile.PL for AIX used -plthreads. Should have been
+ -lpthreads
+
+ * merged Core patches
+ 10372, 10335, 10372, 10534, 10549, 10643, 11051, 11194, 11432
+
+ * added documentation patch regarding duplicate keys from Andrew Johnson
+
diff --git a/db/perl/DB_File/DB_File.pm b/db/perl/DB_File/DB_File.pm
new file mode 100644
index 000000000..289ac0a65
--- /dev/null
+++ b/db/perl/DB_File/DB_File.pm
@@ -0,0 +1,2274 @@
+# DB_File.pm -- Perl 5 interface to Berkeley DB
+#
+# written by Paul Marquess (Paul.Marquess@btinternet.com)
+# last modified 30th July 2001
+# version 1.78
+#
+# Copyright (c) 1995-2001 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+
+package DB_File::HASHINFO ;
+
+require 5.003 ;
+
+use warnings;
+use strict;
+use Carp;
+require Tie::Hash;
+@DB_File::HASHINFO::ISA = qw(Tie::Hash);
+
+sub new
+{
+ my $pkg = shift ;
+ my %x ;
+ tie %x, $pkg ;
+ bless \%x, $pkg ;
+}
+
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => { map {$_, 1}
+ qw( bsize ffactor nelem cachesize hash lorder)
+ },
+ GOT => {}
+ }, $pkg ;
+}
+
+
+sub FETCH
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ return $self->{GOT}{$key} if exists $self->{VALID}{$key} ;
+
+ my $pkg = ref $self ;
+ croak "${pkg}::FETCH - Unknown element '$key'" ;
+}
+
+
+sub STORE
+{
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+
+ if ( exists $self->{VALID}{$key} )
+ {
+ $self->{GOT}{$key} = $value ;
+ return ;
+ }
+
+ my $pkg = ref $self ;
+ croak "${pkg}::STORE - Unknown element '$key'" ;
+}
+
+sub DELETE
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ if ( exists $self->{VALID}{$key} )
+ {
+ delete $self->{GOT}{$key} ;
+ return ;
+ }
+
+ my $pkg = ref $self ;
+ croak "DB_File::HASHINFO::DELETE - Unknown element '$key'" ;
+}
+
+sub EXISTS
+{
+ my $self = shift ;
+ my $key = shift ;
+
+ exists $self->{VALID}{$key} ;
+}
+
+sub NotHere
+{
+ my $self = shift ;
+ my $method = shift ;
+
+ croak ref($self) . " does not define the method ${method}" ;
+}
+
+sub FIRSTKEY { my $self = shift ; $self->NotHere("FIRSTKEY") }
+sub NEXTKEY { my $self = shift ; $self->NotHere("NEXTKEY") }
+sub CLEAR { my $self = shift ; $self->NotHere("CLEAR") }
+
+package DB_File::RECNOINFO ;
+
+use warnings;
+use strict ;
+
+@DB_File::RECNOINFO::ISA = qw(DB_File::HASHINFO) ;
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => { map {$_, 1}
+ qw( bval cachesize psize flags lorder reclen bfname )
+ },
+ GOT => {},
+ }, $pkg ;
+}
+
+package DB_File::BTREEINFO ;
+
+use warnings;
+use strict ;
+
+@DB_File::BTREEINFO::ISA = qw(DB_File::HASHINFO) ;
+
+sub TIEHASH
+{
+ my $pkg = shift ;
+
+ bless { VALID => { map {$_, 1}
+ qw( flags cachesize maxkeypage minkeypage psize
+ compare prefix lorder )
+ },
+ GOT => {},
+ }, $pkg ;
+}
+
+
+package DB_File ;
+
+use warnings;
+use strict;
+use vars qw($VERSION @ISA @EXPORT $AUTOLOAD $DB_BTREE $DB_HASH $DB_RECNO
+ $db_version $use_XSLoader
+ ) ;
+use Carp;
+
+
+$VERSION = "1.78" ;
+
+#typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE;
+$DB_BTREE = new DB_File::BTREEINFO ;
+$DB_HASH = new DB_File::HASHINFO ;
+$DB_RECNO = new DB_File::RECNOINFO ;
+
+require Tie::Hash;
+require Exporter;
+use AutoLoader;
+BEGIN {
+ $use_XSLoader = 1 ;
+ eval { require XSLoader } ;
+
+ if ($@) {
+ $use_XSLoader = 0 ;
+ require DynaLoader;
+ @ISA = qw(DynaLoader);
+ }
+}
+
+push @ISA, qw(Tie::Hash Exporter);
+@EXPORT = qw(
+ $DB_BTREE $DB_HASH $DB_RECNO
+
+ BTREEMAGIC
+ BTREEVERSION
+ DB_LOCK
+ DB_SHMEM
+ DB_TXN
+ HASHMAGIC
+ HASHVERSION
+ MAX_PAGE_NUMBER
+ MAX_PAGE_OFFSET
+ MAX_REC_NUMBER
+ RET_ERROR
+ RET_SPECIAL
+ RET_SUCCESS
+ R_CURSOR
+ R_DUP
+ R_FIRST
+ R_FIXEDLEN
+ R_IAFTER
+ R_IBEFORE
+ R_LAST
+ R_NEXT
+ R_NOKEY
+ R_NOOVERWRITE
+ R_PREV
+ R_RECNOSYNC
+ R_SETCURSOR
+ R_SNAPSHOT
+ __R_UNUSED
+
+);
+
+sub AUTOLOAD {
+ my($constname);
+ ($constname = $AUTOLOAD) =~ s/.*:://;
+ my $val = constant($constname, @_ ? $_[0] : 0);
+ if ($! != 0) {
+ if ($! =~ /Invalid/ || $!{EINVAL}) {
+ $AutoLoader::AUTOLOAD = $AUTOLOAD;
+ goto &AutoLoader::AUTOLOAD;
+ }
+ else {
+ my($pack,$file,$line) = caller;
+ croak "Your vendor has not defined DB macro $constname, used at $file line $line.
+";
+ }
+ }
+ eval "sub $AUTOLOAD { $val }";
+ goto &$AUTOLOAD;
+}
+
+
+eval {
+ # Make all Fcntl O_XXX constants available for importing
+ require Fcntl;
+ my @O = grep /^O_/, @Fcntl::EXPORT;
+ Fcntl->import(@O); # first we import what we want to export
+ push(@EXPORT, @O);
+};
+
+if ($use_XSLoader)
+ { XSLoader::load("DB_File", $VERSION)}
+else
+ { bootstrap DB_File $VERSION }
+
+# Preloaded methods go here. Autoload methods go after __END__, and are
+# processed by the autosplit program.
+
+sub tie_hash_or_array
+{
+ my (@arg) = @_ ;
+ my $tieHASH = ( (caller(1))[3] =~ /TIEHASH/ ) ;
+
+ $arg[4] = tied %{ $arg[4] }
+ if @arg >= 5 && ref $arg[4] && $arg[4] =~ /=HASH/ && tied %{ $arg[4] } ;
+
+ # make recno in Berkeley DB version 2 work like recno in version 1.
+ if ($db_version > 1 and defined $arg[4] and $arg[4] =~ /RECNO/ and
+ $arg[1] and ! -e $arg[1]) {
+ open(FH, ">$arg[1]") or return undef ;
+ close FH ;
+ chmod $arg[3] ? $arg[3] : 0666 , $arg[1] ;
+ }
+
+ DoTie_($tieHASH, @arg) ;
+}
+
+sub TIEHASH
+{
+ tie_hash_or_array(@_) ;
+}
+
+sub TIEARRAY
+{
+ tie_hash_or_array(@_) ;
+}
+
+sub CLEAR
+{
+ my $self = shift;
+ my $key = 0 ;
+ my $value = "" ;
+ my $status = $self->seq($key, $value, R_FIRST());
+ my @keys;
+
+ while ($status == 0) {
+ push @keys, $key;
+ $status = $self->seq($key, $value, R_NEXT());
+ }
+ foreach $key (reverse @keys) {
+ my $s = $self->del($key);
+ }
+}
+
+sub EXTEND { }
+
+sub STORESIZE
+{
+ my $self = shift;
+ my $length = shift ;
+ my $current_length = $self->length() ;
+
+ if ($length < $current_length) {
+ my $key ;
+ for ($key = $current_length - 1 ; $key >= $length ; -- $key)
+ { $self->del($key) }
+ }
+ elsif ($length > $current_length) {
+ $self->put($length-1, "") ;
+ }
+}
+
+
+sub SPLICE
+{
+ my $self = shift;
+ my $offset = shift;
+ if (not defined $offset) {
+ carp 'Use of uninitialized value in splice';
+ $offset = 0;
+ }
+
+ my $length = @_ ? shift : 0;
+ # Carping about definedness comes _after_ the OFFSET sanity check.
+ # This is so we get the same error messages as Perl's splice().
+ #
+
+ my @list = @_;
+
+ my $size = $self->FETCHSIZE();
+
+ # 'If OFFSET is negative then it start that far from the end of
+ # the array.'
+ #
+ if ($offset < 0) {
+ my $new_offset = $size + $offset;
+ if ($new_offset < 0) {
+ die "Modification of non-creatable array value attempted, "
+ . "subscript $offset";
+ }
+ $offset = $new_offset;
+ }
+
+ if ($offset > $size) {
+ $offset = $size;
+ }
+
+ if (not defined $length) {
+ carp 'Use of uninitialized value in splice';
+ $length = 0;
+ }
+
+ # 'If LENGTH is omitted, removes everything from OFFSET onward.'
+ if (not defined $length) {
+ $length = $size - $offset;
+ }
+
+ # 'If LENGTH is negative, leave that many elements off the end of
+ # the array.'
+ #
+ if ($length < 0) {
+ $length = $size - $offset + $length;
+
+ if ($length < 0) {
+ # The user must have specified a length bigger than the
+ # length of the array passed in. But perl's splice()
+ # doesn't catch this, it just behaves as for length=0.
+ #
+ $length = 0;
+ }
+ }
+
+ if ($length > $size - $offset) {
+ $length = $size - $offset;
+ }
+
+ # $num_elems holds the current number of elements in the database.
+ my $num_elems = $size;
+
+ # 'Removes the elements designated by OFFSET and LENGTH from an
+ # array,'...
+ #
+ my @removed = ();
+ foreach (0 .. $length - 1) {
+ my $old;
+ my $status = $self->get($offset, $old);
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on get($offset, \$old)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ": error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+ push @removed, $old;
+
+ $status = $self->del($offset);
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on del($offset)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ": error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+
+ -- $num_elems;
+ }
+
+ # ...'and replaces them with the elements of LIST, if any.'
+ my $pos = $offset;
+ while (defined (my $elem = shift @list)) {
+ my $old_pos = $pos;
+ my $status;
+ if ($pos >= $num_elems) {
+ $status = $self->put($pos, $elem);
+ }
+ else {
+ $status = $self->put($pos, $elem, $self->R_IBEFORE);
+ }
+
+ if ($status != 0) {
+ my $msg = "error from Berkeley DB on put($pos, $elem, ...)";
+ if ($status == 1) {
+ $msg .= ' (no such element?)';
+ }
+ else {
+ $msg .= ", error status $status";
+ if (defined $! and $! ne '') {
+ $msg .= ", message $!";
+ }
+ }
+ die $msg;
+ }
+
+ die "pos unexpectedly changed from $old_pos to $pos with R_IBEFORE"
+ if $old_pos != $pos;
+
+ ++ $pos;
+ ++ $num_elems;
+ }
+
+ if (wantarray) {
+ # 'In list context, returns the elements removed from the
+ # array.'
+ #
+ return @removed;
+ }
+ elsif (defined wantarray and not wantarray) {
+ # 'In scalar context, returns the last element removed, or
+ # undef if no elements are removed.'
+ #
+ if (@removed) {
+ my $last = pop @removed;
+ return "$last";
+ }
+ else {
+ return undef;
+ }
+ }
+ elsif (not defined wantarray) {
+ # Void context
+ }
+ else { die }
+}
+sub ::DB_File::splice { &SPLICE }
+
+sub find_dup
+{
+ croak "Usage: \$db->find_dup(key,value)\n"
+ unless @_ == 3 ;
+
+ my $db = shift ;
+ my ($origkey, $value_wanted) = @_ ;
+ my ($key, $value) = ($origkey, 0);
+ my ($status) = 0 ;
+
+ for ($status = $db->seq($key, $value, R_CURSOR() ) ;
+ $status == 0 ;
+ $status = $db->seq($key, $value, R_NEXT() ) ) {
+
+ return 0 if $key eq $origkey and $value eq $value_wanted ;
+ }
+
+ return $status ;
+}
+
+sub del_dup
+{
+ croak "Usage: \$db->del_dup(key,value)\n"
+ unless @_ == 3 ;
+
+ my $db = shift ;
+ my ($key, $value) = @_ ;
+ my ($status) = $db->find_dup($key, $value) ;
+ return $status if $status != 0 ;
+
+ $status = $db->del($key, R_CURSOR() ) ;
+ return $status ;
+}
+
+sub get_dup
+{
+ croak "Usage: \$db->get_dup(key [,flag])\n"
+ unless @_ == 2 or @_ == 3 ;
+
+ my $db = shift ;
+ my $key = shift ;
+ my $flag = shift ;
+ my $value = 0 ;
+ my $origkey = $key ;
+ my $wantarray = wantarray ;
+ my %values = () ;
+ my @values = () ;
+ my $counter = 0 ;
+ my $status = 0 ;
+
+ # iterate through the database until either EOF ($status == 0)
+ # or a different key is encountered ($key ne $origkey).
+ for ($status = $db->seq($key, $value, R_CURSOR()) ;
+ $status == 0 and $key eq $origkey ;
+ $status = $db->seq($key, $value, R_NEXT()) ) {
+
+ # save the value or count number of matches
+ if ($wantarray) {
+ if ($flag)
+ { ++ $values{$value} }
+ else
+ { push (@values, $value) }
+ }
+ else
+ { ++ $counter }
+
+ }
+
+ return ($wantarray ? ($flag ? %values : @values) : $counter) ;
+}
+
+
+1;
+__END__
+
+=head1 NAME
+
+DB_File - Perl5 access to Berkeley DB version 1.x
+
+=head1 SYNOPSIS
+
+ use DB_File;
+
+ [$X =] tie %hash, 'DB_File', [$filename, $flags, $mode, $DB_HASH] ;
+ [$X =] tie %hash, 'DB_File', $filename, $flags, $mode, $DB_BTREE ;
+ [$X =] tie @array, 'DB_File', $filename, $flags, $mode, $DB_RECNO ;
+
+ $status = $X->del($key [, $flags]) ;
+ $status = $X->put($key, $value [, $flags]) ;
+ $status = $X->get($key, $value [, $flags]) ;
+ $status = $X->seq($key, $value, $flags) ;
+ $status = $X->sync([$flags]) ;
+ $status = $X->fd ;
+
+ # BTREE only
+ $count = $X->get_dup($key) ;
+ @list = $X->get_dup($key) ;
+ %list = $X->get_dup($key, 1) ;
+ $status = $X->find_dup($key, $value) ;
+ $status = $X->del_dup($key, $value) ;
+
+ # RECNO only
+ $a = $X->length;
+ $a = $X->pop ;
+ $X->push(list);
+ $a = $X->shift;
+ $X->unshift(list);
+ @r = $X->splice(offset, length, elements);
+
+ # DBM Filters
+ $old_filter = $db->filter_store_key ( sub { ... } ) ;
+ $old_filter = $db->filter_store_value( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_key ( sub { ... } ) ;
+ $old_filter = $db->filter_fetch_value( sub { ... } ) ;
+
+ untie %hash ;
+ untie @array ;
+
+=head1 DESCRIPTION
+
+B<DB_File> is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 1.x (if you have a newer
+version of DB, see L<Using DB_File with Berkeley DB version 2 or 3>).
+It is assumed that you have a copy of the Berkeley DB manual pages at
+hand when reading this documentation. The interface defined here
+mirrors the Berkeley DB interface closely.
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. B<DB_File> provides an interface to all
+three of the database types currently supported by Berkeley DB.
+
+The file types are:
+
+=over 5
+
+=item B<DB_HASH>
+
+This database type allows arbitrary key/value pairs to be stored in data
+files. This is equivalent to the functionality provided by other
+hashing packages like DBM, NDBM, ODBM, GDBM, and SDBM. Remember though,
+the files created using DB_HASH are not compatible with any of the
+other packages mentioned.
+
+A default hashing algorithm, which will be adequate for most
+applications, is built into Berkeley DB. If you do need to use your own
+hashing algorithm it is possible to write your own in Perl and have
+B<DB_File> use it instead.
+
+=item B<DB_BTREE>
+
+The btree format allows arbitrary key/value pairs to be stored in a
+sorted, balanced binary tree.
+
+As with the DB_HASH format, it is possible to provide a user defined
+Perl routine to perform the comparison of keys. By default, though, the
+keys are stored in lexical order.
+
+=item B<DB_RECNO>
+
+DB_RECNO allows both fixed-length and variable-length flat text files
+to be manipulated using the same key/value pair interface as in DB_HASH
+and DB_BTREE. In this case the key will consist of a record (line)
+number.
+
+=back
+
+=head2 Using DB_File with Berkeley DB version 2 or 3
+
+Although B<DB_File> is intended to be used with Berkeley DB version 1,
+it can also be used with version 2 or 3. In this case the interface is
+limited to the functionality provided by Berkeley DB 1.x. Anywhere the
+version 2 or 3 interface differs, B<DB_File> arranges for it to work
+like version 1. This feature allows B<DB_File> scripts that were built
+with version 1 to be migrated to version 2 or 3 without any changes.
+
+If you want to make use of the new features available in Berkeley DB
+2.x or greater, use the Perl module B<BerkeleyDB> instead.
+
+B<Note:> The database file format has changed in both Berkeley DB
+version 2 and 3. If you cannot recreate your databases, you must dump
+any existing databases with either the C<db_dump> or the C<db_dump185>
+utility that comes with Berkeley DB.
+Once you have rebuilt DB_File to use Berkeley DB version 2 or 3, your
+databases can be recreated using C<db_load>. Refer to the Berkeley DB
+documentation for further details.
+
+Please read L<"COPYRIGHT"> before using version 2.x or 3.x of Berkeley
+DB with DB_File.
+
+=head2 Interface to Berkeley DB
+
+B<DB_File> allows access to Berkeley DB files using the tie() mechanism
+in Perl 5 (for full details, see L<perlfunc/tie()>). This facility
+allows B<DB_File> to access Berkeley DB files using either an
+associative array (for DB_HASH & DB_BTREE file types) or an ordinary
+array (for the DB_RECNO file type).
+
+In addition to the tie() interface, it is also possible to access most
+of the functions provided in the Berkeley DB API directly.
+See L<THE API INTERFACE>.
+
+=head2 Opening a Berkeley DB Database File
+
+Berkeley DB uses the function dbopen() to open or create a database.
+Here is the C prototype for dbopen():
+
+ DB*
+ dbopen (const char * file, int flags, int mode,
+ DBTYPE type, const void * openinfo)
+
+The parameter C<type> is an enumeration which specifies which of the 3
+interface methods (DB_HASH, DB_BTREE or DB_RECNO) is to be used.
+Depending on which of these is actually chosen, the final parameter,
+I<openinfo> points to a data structure which allows tailoring of the
+specific interface method.
+
+This interface is handled slightly differently in B<DB_File>. Here is
+an equivalent call using B<DB_File>:
+
+ tie %array, 'DB_File', $filename, $flags, $mode, $DB_HASH ;
+
+The C<filename>, C<flags> and C<mode> parameters are the direct
+equivalent of their dbopen() counterparts. The final parameter $DB_HASH
+performs the function of both the C<type> and C<openinfo> parameters in
+dbopen().
+
+In the example above $DB_HASH is actually a pre-defined reference to a
+hash object. B<DB_File> has three of these pre-defined references.
+Apart from $DB_HASH, there is also $DB_BTREE and $DB_RECNO.
+
+The keys allowed in each of these pre-defined references is limited to
+the names used in the equivalent C structure. So, for example, the
+$DB_HASH reference will only allow keys called C<bsize>, C<cachesize>,
+C<ffactor>, C<hash>, C<lorder> and C<nelem>.
+
+To change one of these elements, just assign to it like this:
+
+ $DB_HASH->{'cachesize'} = 10000 ;
+
+The three predefined variables $DB_HASH, $DB_BTREE and $DB_RECNO are
+usually adequate for most applications. If you do need to create extra
+instances of these objects, constructors are available for each file
+type.
+
+Here are examples of the constructors and the valid options available
+for DB_HASH, DB_BTREE and DB_RECNO respectively.
+
+ $a = new DB_File::HASHINFO ;
+ $a->{'bsize'} ;
+ $a->{'cachesize'} ;
+ $a->{'ffactor'};
+ $a->{'hash'} ;
+ $a->{'lorder'} ;
+ $a->{'nelem'} ;
+
+ $b = new DB_File::BTREEINFO ;
+ $b->{'flags'} ;
+ $b->{'cachesize'} ;
+ $b->{'maxkeypage'} ;
+ $b->{'minkeypage'} ;
+ $b->{'psize'} ;
+ $b->{'compare'} ;
+ $b->{'prefix'} ;
+ $b->{'lorder'} ;
+
+ $c = new DB_File::RECNOINFO ;
+ $c->{'bval'} ;
+ $c->{'cachesize'} ;
+ $c->{'psize'} ;
+ $c->{'flags'} ;
+ $c->{'lorder'} ;
+ $c->{'reclen'} ;
+ $c->{'bfname'} ;
+
+The values stored in the hashes above are mostly the direct equivalent
+of their C counterpart. Like their C counterparts, all are set to a
+default values - that means you don't have to set I<all> of the
+values when you only want to change one. Here is an example:
+
+ $a = new DB_File::HASHINFO ;
+ $a->{'cachesize'} = 12345 ;
+ tie %y, 'DB_File', "filename", $flags, 0777, $a ;
+
+A few of the options need extra discussion here. When used, the C
+equivalent of the keys C<hash>, C<compare> and C<prefix> store pointers
+to C functions. In B<DB_File> these keys are used to store references
+to Perl subs. Below are templates for each of the subs:
+
+ sub hash
+ {
+ my ($data) = @_ ;
+ ...
+ # return the hash value for $data
+ return $hash ;
+ }
+
+ sub compare
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return 0 if $key1 eq $key2
+ # -1 if $key1 lt $key2
+ # 1 if $key1 gt $key2
+ return (-1 , 0 or 1) ;
+ }
+
+ sub prefix
+ {
+ my ($key, $key2) = @_ ;
+ ...
+ # return number of bytes of $key2 which are
+ # necessary to determine that it is greater than $key1
+ return $bytes ;
+ }
+
+See L<Changing the BTREE sort order> for an example of using the
+C<compare> template.
+
+If you are using the DB_RECNO interface and you intend making use of
+C<bval>, you should check out L<The 'bval' Option>.
+
+=head2 Default Parameters
+
+It is possible to omit some or all of the final 4 parameters in the
+call to C<tie> and let them take default values. As DB_HASH is the most
+common file format used, the call:
+
+ tie %A, "DB_File", "filename" ;
+
+is equivalent to:
+
+ tie %A, "DB_File", "filename", O_CREAT|O_RDWR, 0666, $DB_HASH ;
+
+It is also possible to omit the filename parameter as well, so the
+call:
+
+ tie %A, "DB_File" ;
+
+is equivalent to:
+
+ tie %A, "DB_File", undef, O_CREAT|O_RDWR, 0666, $DB_HASH ;
+
+See L<In Memory Databases> for a discussion on the use of C<undef>
+in place of a filename.
+
+=head2 In Memory Databases
+
+Berkeley DB allows the creation of in-memory databases by using NULL
+(that is, a C<(char *)0> in C) in place of the filename. B<DB_File>
+uses C<undef> instead of NULL to provide this functionality.
+
+=head1 DB_HASH
+
+The DB_HASH file format is probably the most commonly used of the three
+file formats that B<DB_File> supports. It is also very straightforward
+to use.
+
+=head2 A Simple Example
+
+This example shows how to create a database, add key/value pairs to the
+database, delete keys/value pairs and finally how to enumerate the
+contents of the database.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use vars qw( %h $k $v ) ;
+
+ unlink "fruit" ;
+ tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0666, $DB_HASH
+ or die "Cannot open file 'fruit': $!\n";
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+here is the output:
+
+ Banana Exists
+
+ orange -> orange
+ tomato -> red
+ banana -> yellow
+
+Note that the like ordinary associative arrays, the order of the keys
+retrieved is in an apparently random order.
+
+=head1 DB_BTREE
+
+The DB_BTREE format is useful when you want to store data in a given
+order. By default the keys will be stored in lexical order, but as you
+will see from the example shown in the next section, it is very easy to
+define your own sorting function.
+
+=head2 Changing the BTREE sort order
+
+This script shows how to override the default sorting algorithm that
+BTREE uses. Instead of using the normal lexical ordering, a case
+insensitive compare function will be used.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my %h ;
+
+ sub Compare
+ {
+ my ($key1, $key2) = @_ ;
+ "\L$key1" cmp "\L$key2" ;
+ }
+
+ # specify the Perl sub that will do the comparison
+ $DB_BTREE->{'compare'} = \&Compare ;
+
+ unlink "tree" ;
+ tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open file 'tree': $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+Here is the output from the code above.
+
+ mouse
+ Smith
+ Wall
+
+There are a few point to bear in mind if you want to change the
+ordering in a BTREE database:
+
+=over 5
+
+=item 1.
+
+The new compare function must be specified when you create the database.
+
+=item 2.
+
+You cannot change the ordering once the database has been created. Thus
+you must use the same compare function every time you access the
+database.
+
+=item 3
+
+Duplicate keys are entirely defined by the comparison function.
+In the case-insensitive example above, the keys: 'KEY' and 'key'
+would be considered duplicates, and assigning to the second one
+would overwirte the first. If duplicates are allowed for (with the
+R_DUPS flag discussed below), only a single copy of duplicate keys
+is stored in the database --- so (again with example above) assigning
+three values to the keys: 'KEY', 'Key', and 'key' would leave just
+the first key: 'KEY' in the database with three values. For some
+situations this results in information loss, so care should be taken
+to provide fully qualified comparison functions when necessary.
+For example, the above comparison routine could be modified to
+additionally compare case-sensitively if two keys are equal in the
+case insensitive comparison:
+
+ sub compare {
+ my($key1, $key2) = @_;
+ lc $key1 cmp lc $key2 ||
+ $key1 cmp $key2;
+ }
+
+And now you will only have duplicates when the keys themselves
+are truly the same. (note: in versions of the db library prior to
+about November 1996, such duplicate keys were retained so it was
+possible to recover the original keys in sets of keys that
+compared as equal).
+
+
+=back
+
+=head2 Handling Duplicate Keys
+
+The BTREE file type optionally allows a single key to be associated
+with an arbitrary number of values. This option is enabled by setting
+the flags element of C<$DB_BTREE> to R_DUP when creating the database.
+
+There are some difficulties in using the tied hash interface if you
+want to manipulate a BTREE database with duplicate keys. Consider this
+code:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename %h ) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the associative array
+ # and print each key/value pair.
+ foreach (sort keys %h)
+ { print "$_ -> $h{$_}\n" }
+
+ untie %h ;
+
+Here is the output:
+
+ Smith -> John
+ Wall -> Larry
+ Wall -> Larry
+ Wall -> Larry
+ mouse -> mickey
+
+As you can see 3 records have been successfully created with key C<Wall>
+- the only thing is, when they are retrieved from the database they
+I<seem> to have the same value, namely C<Larry>. The problem is caused
+by the way that the associative array interface works. Basically, when
+the associative array interface is used to fetch the value associated
+with a given key, it will only ever retrieve the first value.
+
+Although it may not be immediately obvious from the code above, the
+associative array interface can be used to write values with duplicate
+keys, but it cannot be used to read them back from the database.
+
+The way to get around this problem is to use the Berkeley DB API method
+called C<seq>. This method allows sequential access to key/value
+pairs. See L<THE API INTERFACE> for details of both the C<seq> method
+and the API in general.
+
+Here is the script above rewritten using the C<seq> API method.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $status $key $value) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the btree using seq
+ # and print each key/value pair.
+ $key = $value = 0 ;
+ for ($status = $x->seq($key, $value, R_FIRST) ;
+ $status == 0 ;
+ $status = $x->seq($key, $value, R_NEXT) )
+ { print "$key -> $value\n" }
+
+ undef $x ;
+ untie %h ;
+
+that prints:
+
+ Smith -> John
+ Wall -> Brick
+ Wall -> Brick
+ Wall -> Larry
+ mouse -> mickey
+
+This time we have got all the key/value pairs, including the multiple
+values associated with the key C<Wall>.
+
+To make life easier when dealing with duplicate keys, B<DB_File> comes with
+a few utility methods.
+
+=head2 The get_dup() Method
+
+The C<get_dup> method assists in
+reading duplicate values from BTREE databases. The method can take the
+following forms:
+
+ $count = $x->get_dup($key) ;
+ @list = $x->get_dup($key) ;
+ %list = $x->get_dup($key, 1) ;
+
+In a scalar context the method returns the number of values associated
+with the key, C<$key>.
+
+In list context, it returns all the values which match C<$key>. Note
+that the values will be returned in an apparently random order.
+
+In list context, if the second parameter is present and evaluates
+TRUE, the method returns an associative array. The keys of the
+associative array correspond to the values that matched in the BTREE
+and the values of the array are a count of the number of times that
+particular value occurred in the BTREE.
+
+So assuming the database created above, we can use C<get_dup> like
+this:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h ) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ my $cnt = $x->get_dup("Wall") ;
+ print "Wall occurred $cnt times\n" ;
+
+ my %hash = $x->get_dup("Wall", 1) ;
+ print "Larry is there\n" if $hash{'Larry'} ;
+ print "There are $hash{'Brick'} Brick Walls\n" ;
+
+ my @list = sort $x->get_dup("Wall") ;
+ print "Wall => [@list]\n" ;
+
+ @list = $x->get_dup("Smith") ;
+ print "Smith => [@list]\n" ;
+
+ @list = $x->get_dup("Dog") ;
+ print "Dog => [@list]\n" ;
+
+
+and it will print:
+
+ Wall occurred 3 times
+ Larry is there
+ There are 2 Brick Walls
+ Wall => [Brick Brick Larry]
+ Smith => [John]
+ Dog => []
+
+=head2 The find_dup() Method
+
+ $status = $X->find_dup($key, $value) ;
+
+This method checks for the existence of a specific key/value pair. If the
+pair exists, the cursor is left pointing to the pair and the method
+returns 0. Otherwise the method returns a non-zero value.
+
+Assuming the database from the previous example:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $found) ;
+
+ my $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ;
+ print "Harry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+prints this
+
+ Larry Wall is there
+ Harry Wall is not there
+
+
+=head2 The del_dup() Method
+
+ $status = $X->del_dup($key, $value) ;
+
+This method deletes a specific key/value pair. It returns
+0 if they exist and have been deleted successfully.
+Otherwise the method returns a non-zero value.
+
+Again assuming the existence of the C<tree> database
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $found) ;
+
+ my $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $x->del_dup("Wall", "Larry") ;
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+prints this
+
+ Larry Wall is not there
+
+=head2 Matching Partial Keys
+
+The BTREE interface has a feature which allows partial keys to be
+matched. This functionality is I<only> available when the C<seq> method
+is used along with the R_CURSOR flag.
+
+ $x->seq($key, $value, R_CURSOR) ;
+
+Here is the relevant quote from the dbopen man page where it defines
+the use of the R_CURSOR flag with seq:
+
+ Note, for the DB_BTREE access method, the returned key is not
+ necessarily an exact match for the specified key. The returned key
+ is the smallest key greater than or equal to the specified key,
+ permitting partial key matches and range searches.
+
+In the example script below, the C<match> sub uses this feature to find
+and print the first matching key/value pair given a partial key.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ use vars qw($filename $x %h $st $key $value) ;
+
+ sub match
+ {
+ my $key = shift ;
+ my $value = 0;
+ my $orig_key = $key ;
+ $x->seq($key, $value, R_CURSOR) ;
+ print "$orig_key\t-> $key\t-> $value\n" ;
+ }
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'mouse'} = 'mickey' ;
+ $h{'Wall'} = 'Larry' ;
+ $h{'Walls'} = 'Brick' ;
+ $h{'Smith'} = 'John' ;
+
+
+ $key = $value = 0 ;
+ print "IN ORDER\n" ;
+ for ($st = $x->seq($key, $value, R_FIRST) ;
+ $st == 0 ;
+ $st = $x->seq($key, $value, R_NEXT) )
+
+ { print "$key -> $value\n" }
+
+ print "\nPARTIAL MATCH\n" ;
+
+ match "Wa" ;
+ match "A" ;
+ match "a" ;
+
+ undef $x ;
+ untie %h ;
+
+Here is the output:
+
+ IN ORDER
+ Smith -> John
+ Wall -> Larry
+ Walls -> Brick
+ mouse -> mickey
+
+ PARTIAL MATCH
+ Wa -> Wall -> Larry
+ A -> Smith -> John
+ a -> mouse -> mickey
+
+=head1 DB_RECNO
+
+DB_RECNO provides an interface to flat text files. Both variable and
+fixed length records are supported.
+
+In order to make RECNO more compatible with Perl, the array offset for
+all RECNO arrays begins at 0 rather than 1 as in Berkeley DB.
+
+As with normal Perl arrays, a RECNO array can be accessed using
+negative indexes. The index -1 refers to the last element of the array,
+-2 the second last, and so on. Attempting to access an element before
+the start of the array will raise a fatal run-time error.
+
+=head2 The 'bval' Option
+
+The operation of the bval option warrants some discussion. Here is the
+definition of bval from the Berkeley DB 1.85 recno manual page:
+
+ The delimiting byte to be used to mark the end of a
+ record for variable-length records, and the pad charac-
+ ter for fixed-length records. If no value is speci-
+ fied, newlines (``\n'') are used to mark the end of
+ variable-length records and fixed-length records are
+ padded with spaces.
+
+The second sentence is wrong. In actual fact bval will only default to
+C<"\n"> when the openinfo parameter in dbopen is NULL. If a non-NULL
+openinfo parameter is used at all, the value that happens to be in bval
+will be used. That means you always have to specify bval when making
+use of any of the options in the openinfo parameter. This documentation
+error will be fixed in the next release of Berkeley DB.
+
+That clarifies the situation with regards Berkeley DB itself. What
+about B<DB_File>? Well, the behavior defined in the quote above is
+quite useful, so B<DB_File> conforms to it.
+
+That means that you can specify other options (e.g. cachesize) and
+still have bval default to C<"\n"> for variable length records, and
+space for fixed length records.
+
+Also note that the bval option only allows you to specify a single byte
+as a delimeter.
+
+=head2 A Simple Example
+
+Here is a simple example that uses RECNO (if you are using a version
+of Perl earlier than 5.004_57 this example won't work -- see
+L<Extra RECNO Methods> for a workaround).
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0666, $DB_RECNO
+ or die "Cannot open file 'text': $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ push @h, "green", "black" ;
+
+ my $elements = scalar @h ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = pop @h ;
+ print "popped $last\n" ;
+
+ unshift @h, "white" ;
+ my $first = shift @h ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ # use a negative index
+ print "The last element is $h[-1]\n" ;
+ print "The 2nd last element is $h[-2]\n" ;
+
+ untie @h ;
+
+Here is the output from the script:
+
+ The array contains 5 entries
+ popped black
+ shifted white
+ Element 1 Exists with value blue
+ The last element is green
+ The 2nd last element is yellow
+
+=head2 Extra RECNO Methods
+
+If you are using a version of Perl earlier than 5.004_57, the tied
+array interface is quite limited. In the example script above
+C<push>, C<pop>, C<shift>, C<unshift>
+or determining the array length will not work with a tied array.
+
+To make the interface more useful for older versions of Perl, a number
+of methods are supplied with B<DB_File> to simulate the missing array
+operations. All these methods are accessed via the object returned from
+the tie call.
+
+Here are the methods:
+
+=over 5
+
+=item B<$X-E<gt>push(list) ;>
+
+Pushes the elements of C<list> to the end of the array.
+
+=item B<$value = $X-E<gt>pop ;>
+
+Removes and returns the last element of the array.
+
+=item B<$X-E<gt>shift>
+
+Removes and returns the first element of the array.
+
+=item B<$X-E<gt>unshift(list) ;>
+
+Pushes the elements of C<list> to the start of the array.
+
+=item B<$X-E<gt>length>
+
+Returns the number of elements in the array.
+
+=item B<$X-E<gt>splice(offset, length, elements);>
+
+Returns a splice of the the array.
+
+=back
+
+=head2 Another Example
+
+Here is a more complete example that makes use of some of the methods
+described above. It also makes use of the API interface directly (see
+L<THE API INTERFACE>).
+
+ use warnings ;
+ use strict ;
+ use vars qw(@h $H $file $i) ;
+ use DB_File ;
+ use Fcntl ;
+
+ $file = "text" ;
+
+ unlink $file ;
+
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0666, $DB_RECNO
+ or die "Cannot open file $file: $!\n" ;
+
+ # first create a text file to play with
+ $h[0] = "zero" ;
+ $h[1] = "one" ;
+ $h[2] = "two" ;
+ $h[3] = "three" ;
+ $h[4] = "four" ;
+
+
+ # Print the records in order.
+ #
+ # The length method is needed here because evaluating a tied
+ # array in a scalar context does not return the number of
+ # elements in the array.
+
+ print "\nORIGINAL\n" ;
+ foreach $i (0 .. $H->length - 1) {
+ print "$i: $h[$i]\n" ;
+ }
+
+ # use the push & pop methods
+ $a = $H->pop ;
+ $H->push("last") ;
+ print "\nThe last record was [$a]\n" ;
+
+ # and the shift & unshift methods
+ $a = $H->shift ;
+ $H->unshift("first") ;
+ print "The first record was [$a]\n" ;
+
+ # Use the API to add a new record after record 2.
+ $i = 2 ;
+ $H->put($i, "Newbie", R_IAFTER) ;
+
+ # and a new record before record 1.
+ $i = 1 ;
+ $H->put($i, "New One", R_IBEFORE) ;
+
+ # delete record 3
+ $H->del(3) ;
+
+ # now print the records in reverse order
+ print "\nREVERSE\n" ;
+ for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
+ { print "$i: $h[$i]\n" }
+
+ # same again, but use the API functions instead
+ print "\nREVERSE again\n" ;
+ my ($s, $k, $v) = (0, 0, 0) ;
+ for ($s = $H->seq($k, $v, R_LAST) ;
+ $s == 0 ;
+ $s = $H->seq($k, $v, R_PREV))
+ { print "$k: $v\n" }
+
+ undef $H ;
+ untie @h ;
+
+and this is what it outputs:
+
+ ORIGINAL
+ 0: zero
+ 1: one
+ 2: two
+ 3: three
+ 4: four
+
+ The last record was [four]
+ The first record was [zero]
+
+ REVERSE
+ 5: last
+ 4: three
+ 3: Newbie
+ 2: one
+ 1: New One
+ 0: first
+
+ REVERSE again
+ 5: last
+ 4: three
+ 3: Newbie
+ 2: one
+ 1: New One
+ 0: first
+
+Notes:
+
+=over 5
+
+=item 1.
+
+Rather than iterating through the array, C<@h> like this:
+
+ foreach $i (@h)
+
+it is necessary to use either this:
+
+ foreach $i (0 .. $H->length - 1)
+
+or this:
+
+ for ($a = $H->get($k, $v, R_FIRST) ;
+ $a == 0 ;
+ $a = $H->get($k, $v, R_NEXT) )
+
+=item 2.
+
+Notice that both times the C<put> method was used the record index was
+specified using a variable, C<$i>, rather than the literal value
+itself. This is because C<put> will return the record number of the
+inserted line via that parameter.
+
+=back
+
+=head1 THE API INTERFACE
+
+As well as accessing Berkeley DB using a tied hash or array, it is also
+possible to make direct use of most of the API functions defined in the
+Berkeley DB documentation.
+
+To do this you need to store a copy of the object returned from the tie.
+
+ $db = tie %hash, "DB_File", "filename" ;
+
+Once you have done that, you can access the Berkeley DB API functions
+as B<DB_File> methods directly like this:
+
+ $db->put($key, $value, R_NOOVERWRITE) ;
+
+B<Important:> If you have saved a copy of the object returned from
+C<tie>, the underlying database file will I<not> be closed until both
+the tied variable is untied and all copies of the saved object are
+destroyed.
+
+ use DB_File ;
+ $db = tie %hash, "DB_File", "filename"
+ or die "Cannot tie filename: $!" ;
+ ...
+ undef $db ;
+ untie %hash ;
+
+See L<The untie() Gotcha> for more details.
+
+All the functions defined in L<dbopen> are available except for
+close() and dbopen() itself. The B<DB_File> method interface to the
+supported functions have been implemented to mirror the way Berkeley DB
+works whenever possible. In particular note that:
+
+=over 5
+
+=item *
+
+The methods return a status value. All return 0 on success.
+All return -1 to signify an error and set C<$!> to the exact
+error code. The return code 1 generally (but not always) means that the
+key specified did not exist in the database.
+
+Other return codes are defined. See below and in the Berkeley DB
+documentation for details. The Berkeley DB documentation should be used
+as the definitive source.
+
+=item *
+
+Whenever a Berkeley DB function returns data via one of its parameters,
+the equivalent B<DB_File> method does exactly the same.
+
+=item *
+
+If you are careful, it is possible to mix API calls with the tied
+hash/array interface in the same piece of code. Although only a few of
+the methods used to implement the tied interface currently make use of
+the cursor, you should always assume that the cursor has been changed
+any time the tied hash/array interface is used. As an example, this
+code will probably not do what you expect:
+
+ $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
+ or die "Cannot tie $filename: $!" ;
+
+ # Get the first key/value pair and set the cursor
+ $X->seq($key, $value, R_FIRST) ;
+
+ # this line will modify the cursor
+ $count = scalar keys %x ;
+
+ # Get the second key/value pair.
+ # oops, it didn't, it got the last key/value pair!
+ $X->seq($key, $value, R_NEXT) ;
+
+The code above can be rearranged to get around the problem, like this:
+
+ $X = tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0777, $DB_BTREE
+ or die "Cannot tie $filename: $!" ;
+
+ # this line will modify the cursor
+ $count = scalar keys %x ;
+
+ # Get the first key/value pair and set the cursor
+ $X->seq($key, $value, R_FIRST) ;
+
+ # Get the second key/value pair.
+ # worked this time.
+ $X->seq($key, $value, R_NEXT) ;
+
+=back
+
+All the constants defined in L<dbopen> for use in the flags parameters
+in the methods defined below are also available. Refer to the Berkeley
+DB documentation for the precise meaning of the flags values.
+
+Below is a list of the methods available.
+
+=over 5
+
+=item B<$status = $X-E<gt>get($key, $value [, $flags]) ;>
+
+Given a key (C<$key>) this method reads the value associated with it
+from the database. The value read from the database is returned in the
+C<$value> parameter.
+
+If the key does not exist the method returns 1.
+
+No flags are currently defined for this method.
+
+=item B<$status = $X-E<gt>put($key, $value [, $flags]) ;>
+
+Stores the key/value pair in the database.
+
+If you use either the R_IAFTER or R_IBEFORE flags, the C<$key> parameter
+will have the record number of the inserted key/value pair set.
+
+Valid flags are R_CURSOR, R_IAFTER, R_IBEFORE, R_NOOVERWRITE and
+R_SETCURSOR.
+
+=item B<$status = $X-E<gt>del($key [, $flags]) ;>
+
+Removes all key/value pairs with key C<$key> from the database.
+
+A return code of 1 means that the requested key was not in the
+database.
+
+R_CURSOR is the only valid flag at present.
+
+=item B<$status = $X-E<gt>fd ;>
+
+Returns the file descriptor for the underlying database.
+
+See L<Locking: The Trouble with fd> for an explanation for why you should
+not use C<fd> to lock your database.
+
+=item B<$status = $X-E<gt>seq($key, $value, $flags) ;>
+
+This interface allows sequential retrieval from the database. See
+L<dbopen> for full details.
+
+Both the C<$key> and C<$value> parameters will be set to the key/value
+pair read from the database.
+
+The flags parameter is mandatory. The valid flag values are R_CURSOR,
+R_FIRST, R_LAST, R_NEXT and R_PREV.
+
+=item B<$status = $X-E<gt>sync([$flags]) ;>
+
+Flushes any cached buffers to disk.
+
+R_RECNOSYNC is the only valid flag at present.
+
+=back
+
+=head1 DBM FILTERS
+
+A DBM Filter is a piece of code that is be used when you I<always>
+want to make the same transformation to all keys and/or values in a
+DBM database.
+
+There are four methods associated with DBM Filters. All work identically,
+and each is used to install (or uninstall) a single DBM Filter. Each
+expects a single parameter, namely a reference to a sub. The only
+difference between them is the place that the filter is installed.
+
+To summarise:
+
+=over 5
+
+=item B<filter_store_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a key to a DBM database.
+
+=item B<filter_store_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you write a value to a DBM database.
+
+
+=item B<filter_fetch_key>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a key from a DBM database.
+
+=item B<filter_fetch_value>
+
+If a filter has been installed with this method, it will be invoked
+every time you read a value from a DBM database.
+
+=back
+
+You can use any combination of the methods, from none, to all four.
+
+All filter methods return the existing filter, if present, or C<undef>
+in not.
+
+To delete a filter pass C<undef> to it.
+
+=head2 The Filter
+
+When each filter is called by Perl, a local copy of C<$_> will contain
+the key or value to be filtered. Filtering is achieved by modifying
+the contents of C<$_>. The return code from the filter is ignored.
+
+=head2 An Example -- the NULL termination problem.
+
+Consider the following scenario. You have a DBM database
+that you need to share with a third-party C application. The C application
+assumes that I<all> keys and values are NULL terminated. Unfortunately
+when Perl writes to DBM databases it doesn't use NULL termination, so
+your Perl application will have to manage NULL termination itself. When
+you write to the database you will have to use something like this:
+
+ $hash{"$key\0"} = "$value\0" ;
+
+Similarly the NULL needs to be taken into account when you are considering
+the length of existing keys/values.
+
+It would be much better if you could ignore the NULL terminations issue
+in the main application code and have a mechanism that automatically
+added the terminating NULL to all keys and values whenever you write to
+the database and have them removed when you read from the database. As I'm
+sure you have already guessed, this is a problem that DBM Filters can
+fix very easily.
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ my %hash ;
+ my $filename = "/tmp/filt" ;
+ unlink $filename ;
+
+ my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH
+ or die "Cannot open $filename: $!\n" ;
+
+ # Install DBM Filters
+ $db->filter_fetch_key ( sub { s/\0$// } ) ;
+ $db->filter_store_key ( sub { $_ .= "\0" } ) ;
+ $db->filter_fetch_value( sub { s/\0$// } ) ;
+ $db->filter_store_value( sub { $_ .= "\0" } ) ;
+
+ $hash{"abc"} = "def" ;
+ my $a = $hash{"ABC"} ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+Hopefully the contents of each of the filters should be
+self-explanatory. Both "fetch" filters remove the terminating NULL,
+and both "store" filters add a terminating NULL.
+
+
+=head2 Another Example -- Key is a C int.
+
+Here is another real-life example. By default, whenever Perl writes to
+a DBM database it always writes the key and value as strings. So when
+you use this:
+
+ $hash{12345} = "soemthing" ;
+
+the key 12345 will get stored in the DBM database as the 5 byte string
+"12345". If you actually want the key to be stored in the DBM database
+as a C int, you will have to use C<pack> when writing, and C<unpack>
+when reading.
+
+Here is a DBM Filter that does it:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my %hash ;
+ my $filename = "/tmp/filt" ;
+ unlink $filename ;
+
+
+ my $db = tie %hash, 'DB_File', $filename, O_CREAT|O_RDWR, 0666, $DB_HASH
+ or die "Cannot open $filename: $!\n" ;
+
+ $db->filter_fetch_key ( sub { $_ = unpack("i", $_) } ) ;
+ $db->filter_store_key ( sub { $_ = pack ("i", $_) } ) ;
+ $hash{123} = "def" ;
+ # ...
+ undef $db ;
+ untie %hash ;
+
+This time only two filters have been used -- we only need to manipulate
+the contents of the key, so it wasn't necessary to install any value
+filters.
+
+=head1 HINTS AND TIPS
+
+
+=head2 Locking: The Trouble with fd
+
+Until version 1.72 of this module, the recommended technique for locking
+B<DB_File> databases was to flock the filehandle returned from the "fd"
+function. Unfortunately this technique has been shown to be fundamentally
+flawed (Kudos to David Harris for tracking this down). Use it at your own
+peril!
+
+The locking technique went like this.
+
+ $db = tie(%db, 'DB_File', '/tmp/foo.db', O_CREAT|O_RDWR, 0666)
+ || die "dbcreat /tmp/foo.db $!";
+ $fd = $db->fd;
+ open(DB_FH, "+<&=$fd") || die "dup $!";
+ flock (DB_FH, LOCK_EX) || die "flock: $!";
+ ...
+ $db{"Tom"} = "Jerry" ;
+ ...
+ flock(DB_FH, LOCK_UN);
+ undef $db;
+ untie %db;
+ close(DB_FH);
+
+In simple terms, this is what happens:
+
+=over 5
+
+=item 1.
+
+Use "tie" to open the database.
+
+=item 2.
+
+Lock the database with fd & flock.
+
+=item 3.
+
+Read & Write to the database.
+
+=item 4.
+
+Unlock and close the database.
+
+=back
+
+Here is the crux of the problem. A side-effect of opening the B<DB_File>
+database in step 2 is that an initial block from the database will get
+read from disk and cached in memory.
+
+To see why this is a problem, consider what can happen when two processes,
+say "A" and "B", both want to update the same B<DB_File> database
+using the locking steps outlined above. Assume process "A" has already
+opened the database and has a write lock, but it hasn't actually updated
+the database yet (it has finished step 2, but not started step 3 yet). Now
+process "B" tries to open the same database - step 1 will succeed,
+but it will block on step 2 until process "A" releases the lock. The
+important thing to notice here is that at this point in time both
+processes will have cached identical initial blocks from the database.
+
+Now process "A" updates the database and happens to change some of the
+data held in the initial buffer. Process "A" terminates, flushing
+all cached data to disk and releasing the database lock. At this point
+the database on disk will correctly reflect the changes made by process
+"A".
+
+With the lock released, process "B" can now continue. It also updates the
+database and unfortunately it too modifies the data that was in its
+initial buffer. Once that data gets flushed to disk it will overwrite
+some/all of the changes process "A" made to the database.
+
+The result of this scenario is at best a database that doesn't contain
+what you expect. At worst the database will corrupt.
+
+The above won't happen every time competing process update the same
+B<DB_File> database, but it does illustrate why the technique should
+not be used.
+
+=head2 Safe ways to lock a database
+
+Starting with version 2.x, Berkeley DB has internal support for locking.
+The companion module to this one, B<BerkeleyDB>, provides an interface
+to this locking functionality. If you are serious about locking
+Berkeley DB databases, I strongly recommend using B<BerkeleyDB>.
+
+If using B<BerkeleyDB> isn't an option, there are a number of modules
+available on CPAN that can be used to implement locking. Each one
+implements locking differently and has different goals in mind. It is
+therefore worth knowing the difference, so that you can pick the right
+one for your application. Here are the three locking wrappers:
+
+=over 5
+
+=item B<Tie::DB_Lock>
+
+A B<DB_File> wrapper which creates copies of the database file for
+read access, so that you have a kind of a multiversioning concurrent read
+system. However, updates are still serial. Use for databases where reads
+may be lengthy and consistency problems may occur.
+
+=item B<Tie::DB_LockFile>
+
+A B<DB_File> wrapper that has the ability to lock and unlock the database
+while it is being used. Avoids the tie-before-flock problem by simply
+re-tie-ing the database when you get or drop a lock. Because of the
+flexibility in dropping and re-acquiring the lock in the middle of a
+session, this can be massaged into a system that will work with long
+updates and/or reads if the application follows the hints in the POD
+documentation.
+
+=item B<DB_File::Lock>
+
+An extremely lightweight B<DB_File> wrapper that simply flocks a lockfile
+before tie-ing the database and drops the lock after the untie. Allows
+one to use the same lockfile for multiple databases to avoid deadlock
+problems, if desired. Use for databases where updates are reads are
+quick and simple flock locking semantics are enough.
+
+=back
+
+=head2 Sharing Databases With C Applications
+
+There is no technical reason why a Berkeley DB database cannot be
+shared by both a Perl and a C application.
+
+The vast majority of problems that are reported in this area boil down
+to the fact that C strings are NULL terminated, whilst Perl strings are
+not. See L<DBM FILTERS> for a generic way to work around this problem.
+
+Here is a real example. Netscape 2.0 keeps a record of the locations you
+visit along with the time you last visited them in a DB_HASH database.
+This is usually stored in the file F<~/.netscape/history.db>. The key
+field in the database is the location string and the value field is the
+time the location was last visited stored as a 4 byte binary value.
+
+If you haven't already guessed, the location string is stored with a
+terminating NULL. This means you need to be careful when accessing the
+database.
+
+Here is a snippet of code that is loosely based on Tom Christiansen's
+I<ggh> script (available from your nearest CPAN archive in
+F<authors/id/TOMC/scripts/nshist.gz>).
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ use vars qw( $dotdir $HISTORY %hist_db $href $binary_time $date ) ;
+ $dotdir = $ENV{HOME} || $ENV{LOGNAME};
+
+ $HISTORY = "$dotdir/.netscape/history.db";
+
+ tie %hist_db, 'DB_File', $HISTORY
+ or die "Cannot open $HISTORY: $!\n" ;;
+
+ # Dump the complete database
+ while ( ($href, $binary_time) = each %hist_db ) {
+
+ # remove the terminating NULL
+ $href =~ s/\x00$// ;
+
+ # convert the binary time into a user friendly string
+ $date = localtime unpack("V", $binary_time);
+ print "$date $href\n" ;
+ }
+
+ # check for the existence of a specific key
+ # remember to add the NULL
+ if ( $binary_time = $hist_db{"http://mox.perl.com/\x00"} ) {
+ $date = localtime unpack("V", $binary_time) ;
+ print "Last visited mox.perl.com on $date\n" ;
+ }
+ else {
+ print "Never visited mox.perl.com\n"
+ }
+
+ untie %hist_db ;
+
+=head2 The untie() Gotcha
+
+If you make use of the Berkeley DB API, it is I<very> strongly
+recommended that you read L<perltie/The untie Gotcha>.
+
+Even if you don't currently make use of the API interface, it is still
+worth reading it.
+
+Here is an example which illustrates the problem from a B<DB_File>
+perspective:
+
+ use DB_File ;
+ use Fcntl ;
+
+ my %x ;
+ my $X ;
+
+ $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_TRUNC
+ or die "Cannot tie first time: $!" ;
+
+ $x{123} = 456 ;
+
+ untie %x ;
+
+ tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
+ or die "Cannot tie second time: $!" ;
+
+ untie %x ;
+
+When run, the script will produce this error message:
+
+ Cannot tie second time: Invalid argument at bad.file line 14.
+
+Although the error message above refers to the second tie() statement
+in the script, the source of the problem is really with the untie()
+statement that precedes it.
+
+Having read L<perltie> you will probably have already guessed that the
+error is caused by the extra copy of the tied object stored in C<$X>.
+If you haven't, then the problem boils down to the fact that the
+B<DB_File> destructor, DESTROY, will not be called until I<all>
+references to the tied object are destroyed. Both the tied variable,
+C<%x>, and C<$X> above hold a reference to the object. The call to
+untie() will destroy the first, but C<$X> still holds a valid
+reference, so the destructor will not get called and the database file
+F<tst.fil> will remain open. The fact that Berkeley DB then reports the
+attempt to open a database that is already open via the catch-all
+"Invalid argument" doesn't help.
+
+If you run the script with the C<-w> flag the error message becomes:
+
+ untie attempted while 1 inner references still exist at bad.file line 12.
+ Cannot tie second time: Invalid argument at bad.file line 14.
+
+which pinpoints the real problem. Finally the script can now be
+modified to fix the original problem by destroying the API object
+before the untie:
+
+ ...
+ $x{123} = 456 ;
+
+ undef $X ;
+ untie %x ;
+
+ $X = tie %x, 'DB_File', 'tst.fil' , O_RDWR|O_CREAT
+ ...
+
+
+=head1 COMMON QUESTIONS
+
+=head2 Why is there Perl source in my database?
+
+If you look at the contents of a database file created by DB_File,
+there can sometimes be part of a Perl script included in it.
+
+This happens because Berkeley DB uses dynamic memory to allocate
+buffers which will subsequently be written to the database file. Being
+dynamic, the memory could have been used for anything before DB
+malloced it. As Berkeley DB doesn't clear the memory once it has been
+allocated, the unused portions will contain random junk. In the case
+where a Perl script gets written to the database, the random junk will
+correspond to an area of dynamic memory that happened to be used during
+the compilation of the script.
+
+Unless you don't like the possibility of there being part of your Perl
+scripts embedded in a database file, this is nothing to worry about.
+
+=head2 How do I store complex data structures with DB_File?
+
+Although B<DB_File> cannot do this directly, there is a module which
+can layer transparently over B<DB_File> to accomplish this feat.
+
+Check out the MLDBM module, available on CPAN in the directory
+F<modules/by-module/MLDBM>.
+
+=head2 What does "Invalid Argument" mean?
+
+You will get this error message when one of the parameters in the
+C<tie> call is wrong. Unfortunately there are quite a few parameters to
+get wrong, so it can be difficult to figure out which one it is.
+
+Here are a couple of possibilities:
+
+=over 5
+
+=item 1.
+
+Attempting to reopen a database without closing it.
+
+=item 2.
+
+Using the O_WRONLY flag.
+
+=back
+
+=head2 What does "Bareword 'DB_File' not allowed" mean?
+
+You will encounter this particular error message when you have the
+C<strict 'subs'> pragma (or the full strict pragma) in your script.
+Consider this script:
+
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ use vars qw(%x) ;
+ tie %x, DB_File, "filename" ;
+
+Running it produces the error in question:
+
+ Bareword "DB_File" not allowed while "strict subs" in use
+
+To get around the error, place the word C<DB_File> in either single or
+double quotes, like this:
+
+ tie %x, "DB_File", "filename" ;
+
+Although it might seem like a real pain, it is really worth the effort
+of having a C<use strict> in all your scripts.
+
+=head1 REFERENCES
+
+Articles that are either about B<DB_File> or make use of it.
+
+=over 5
+
+=item 1.
+
+I<Full-Text Searching in Perl>, Tim Kientzle (tkientzle@ddj.com),
+Dr. Dobb's Journal, Issue 295, January 1999, pp 34-41
+
+=back
+
+=head1 HISTORY
+
+Moved to the Changes file.
+
+=head1 BUGS
+
+Some older versions of Berkeley DB had problems with fixed length
+records using the RECNO file format. This problem has been fixed since
+version 1.85 of Berkeley DB.
+
+I am sure there are bugs in the code. If you do find any, or can
+suggest any enhancements, I would welcome your comments.
+
+=head1 AVAILABILITY
+
+B<DB_File> comes with the standard Perl source distribution. Look in
+the directory F<ext/DB_File>. Given the amount of time between releases
+of Perl the version that ships with Perl is quite likely to be out of
+date, so the most recent version can always be found on CPAN (see
+L<perlmod/CPAN> for details), in the directory
+F<modules/by-module/DB_File>.
+
+This version of B<DB_File> will work with either version 1.x, 2.x or
+3.x of Berkeley DB, but is limited to the functionality provided by
+version 1.
+
+The official web site for Berkeley DB is F<http://www.sleepycat.com>.
+All versions of Berkeley DB are available there.
+
+Alternatively, Berkeley DB version 1 is available at your nearest CPAN
+archive in F<src/misc/db.1.85.tar.gz>.
+
+If you are running IRIX, then get Berkeley DB version 1 from
+F<http://reality.sgi.com/ariel>. It has the patches necessary to
+compile properly on IRIX 5.3.
+
+=head1 COPYRIGHT
+
+Copyright (c) 1995-2001 Paul Marquess. All rights reserved. This program
+is free software; you can redistribute it and/or modify it under the
+same terms as Perl itself.
+
+Although B<DB_File> is covered by the Perl license, the library it
+makes use of, namely Berkeley DB, is not. Berkeley DB has its own
+copyright and its own license. Please take the time to read it.
+
+Here are are few words taken from the Berkeley DB FAQ (at
+F<http://www.sleepycat.com>) regarding the license:
+
+ Do I have to license DB to use it in Perl scripts?
+
+ No. The Berkeley DB license requires that software that uses
+ Berkeley DB be freely redistributable. In the case of Perl, that
+ software is Perl, and not your scripts. Any Perl scripts that you
+ write are your property, including scripts that make use of
+ Berkeley DB. Neither the Perl license nor the Berkeley DB license
+ place any restriction on what you may do with them.
+
+If you are in any doubt about the license situation, contact either the
+Berkeley DB authors or the author of DB_File. See L<"AUTHOR"> for details.
+
+
+=head1 SEE ALSO
+
+L<perl(1)>, L<dbopen(3)>, L<hash(3)>, L<recno(3)>, L<btree(3)>,
+L<dbmfilter>
+
+=head1 AUTHOR
+
+The DB_File interface was written by Paul Marquess
+E<lt>Paul.Marquess@btinternet.comE<gt>.
+Questions about the DB system itself may be addressed to
+E<lt>db@sleepycat.com<gt>.
+
+=cut
diff --git a/db/perl/DB_File/DB_File.xs b/db/perl/DB_File/DB_File.xs
new file mode 100644
index 000000000..d2dc572ab
--- /dev/null
+++ b/db/perl/DB_File/DB_File.xs
@@ -0,0 +1,2122 @@
+/*
+
+ DB_File.xs -- Perl 5 interface to Berkeley DB
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+ last modified 30th July 2001
+ version 1.78
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1995-2001 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Changes:
+ 0.1 - Initial Release
+ 0.2 - No longer bombs out if dbopen returns an error.
+ 0.3 - Added some support for multiple btree compares
+ 1.0 - Complete support for multiple callbacks added.
+ Fixed a problem with pushing a value onto an empty list.
+ 1.01 - Fixed a SunOS core dump problem.
+ The return value from TIEHASH wasn't set to NULL when
+ dbopen returned an error.
+ 1.02 - Use ALIAS to define TIEARRAY.
+ Removed some redundant commented code.
+ Merged OS2 code into the main distribution.
+ Allow negative subscripts with RECNO interface.
+ Changed the default flags to O_CREAT|O_RDWR
+ 1.03 - Added EXISTS
+ 1.04 - fixed a couple of bugs in hash_cb. Patches supplied by
+ Dave Hammen, hammen@gothamcity.jsc.nasa.gov
+ 1.05 - Added logic to allow prefix & hash types to be specified via
+ Makefile.PL
+ 1.06 - Minor namespace cleanup: Localized PrintBtree.
+ 1.07 - Fixed bug with RECNO, where bval wasn't defaulting to "\n".
+ 1.08 - No change to DB_File.xs
+ 1.09 - Default mode for dbopen changed to 0666
+ 1.10 - Fixed fd method so that it still returns -1 for
+ in-memory files when db 1.86 is used.
+ 1.11 - No change to DB_File.xs
+ 1.12 - No change to DB_File.xs
+ 1.13 - Tidied up a few casts.
+ 1.14 - Made it illegal to tie an associative array to a RECNO
+ database and an ordinary array to a HASH or BTREE database.
+ 1.50 - Make work with both DB 1.x or DB 2.x
+ 1.51 - Fixed a bug in mapping 1.x O_RDONLY flag to 2.x DB_RDONLY equivalent
+ 1.52 - Patch from Gisle Aas <gisle@aas.no> to suppress "use of
+ undefined value" warning with db_get and db_seq.
+ 1.53 - Added DB_RENUMBER to flags for recno.
+ 1.54 - Fixed bug in the fd method
+ 1.55 - Fix for AIX from Jarkko Hietaniemi
+ 1.56 - No change to DB_File.xs
+ 1.57 - added the #undef op to allow building with Threads support.
+ 1.58 - Fixed a problem with the use of sv_setpvn. When the
+ size is specified as 0, it does a strlen on the data.
+ This was ok for DB 1.x, but isn't for DB 2.x.
+ 1.59 - No change to DB_File.xs
+ 1.60 - Some code tidy up
+ 1.61 - added flagSet macro for DB 2.5.x
+ fixed typo in O_RDONLY test.
+ 1.62 - No change to DB_File.xs
+ 1.63 - Fix to alllow DB 2.6.x to build.
+ 1.64 - Tidied up the 1.x to 2.x flags mapping code.
+ Added a patch from Mark Kettenis <kettenis@wins.uva.nl>
+ to fix a flag mapping problem with O_RDONLY on the Hurd
+ 1.65 - Fixed a bug in the PUSH logic.
+ Added BOOT check that using 2.3.4 or greater
+ 1.66 - Added DBM filter code
+ 1.67 - Backed off the use of newSVpvn.
+ Fixed DBM Filter code for Perl 5.004.
+ Fixed a small memory leak in the filter code.
+ 1.68 - fixed backward compatability bug with R_IAFTER & R_IBEFORE
+ merged in the 5.005_58 changes
+ 1.69 - fixed a bug in push -- DB_APPEND wasn't working properly.
+ Fixed the R_SETCURSOR bug introduced in 1.68
+ Added a new Perl variable $DB_File::db_ver
+ 1.70 - Initialise $DB_File::db_ver and $DB_File::db_version with
+ GV_ADD|GV_ADDMULT -- bug spotted by Nick Ing-Simmons.
+ Added a BOOT check to test for equivalent versions of db.h &
+ libdb.a/so.
+ 1.71 - Support for Berkeley DB version 3.
+ Support for Berkeley DB 2/3's backward compatability mode.
+ Rewrote push
+ 1.72 - No change to DB_File.xs
+ 1.73 - No change to DB_File.xs
+ 1.74 - A call to open needed parenthesised to stop it clashing
+ with a win32 macro.
+ Added Perl core patches 7703 & 7801.
+ 1.75 - Fixed Perl core patch 7703.
+ Added suppport to allow DB_File to be built with
+ Berkeley DB 3.2 -- btree_compare, btree_prefix and hash_cb
+ needed to be changed.
+ 1.76 - No change to DB_File.xs
+ 1.77 - Tidied up a few types used in calling newSVpvn.
+ 1.78 - Core patch 10335, 10372, 10534, 10549, 11051 included.
+
+*/
+
+#define PERL_NO_GET_CONTEXT
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#ifndef PERL_VERSION
+# include "patchlevel.h"
+# define PERL_REVISION 5
+# define PERL_VERSION PATCHLEVEL
+# define PERL_SUBVERSION SUBVERSION
+#endif
+
+#if PERL_REVISION == 5 && (PERL_VERSION < 4 || (PERL_VERSION == 4 && PERL_SUBVERSION <= 75 ))
+
+# define PL_sv_undef sv_undef
+# define PL_na na
+
+#endif
+
+/* DEFSV appears first in 5.004_56 */
+#ifndef DEFSV
+# define DEFSV GvSV(defgv)
+#endif
+
+/* Mention DB_VERSION_MAJOR_CFG, DB_VERSION_MINOR_CFG, and
+ DB_VERSION_PATCH_CFG here so that Configure pulls them all in. */
+
+/* Being the Berkeley DB we prefer the <sys/cdefs.h> (which will be
+ * shortly #included by the <db.h>) __attribute__ to the possibly
+ * already defined __attribute__, for example by GNUC or by Perl. */
+
+/* #if DB_VERSION_MAJOR_CFG < 2 */
+#ifndef DB_VERSION_MAJOR
+# undef __attribute__
+#endif
+
+
+
+/* If Perl has been compiled with Threads support,the symbol op will
+ be defined here. This clashes with a field name in db.h, so get rid of it.
+ */
+#ifdef op
+# undef op
+#endif
+
+#ifdef COMPAT185
+# include <db_185.h>
+#else
+# include <db.h>
+#endif
+
+/* Wall starts with 5.7.x */
+
+#if PERL_REVISION > 5 || (PERL_REVISION == 5 && PERL_VERSION >= 7)
+
+/* Since we dropped the gccish definition of __attribute__ we will want
+ * to redefine dNOOP, however (so that dTHX continues to work). Yes,
+ * all this means that we can't do attribute checking on the DB_File,
+ * boo, hiss. */
+# ifndef DB_VERSION_MAJOR
+
+# undef dNOOP
+# define dNOOP extern int Perl___notused
+
+ /* Ditto for dXSARGS. */
+# undef dXSARGS
+# define dXSARGS \
+ dSP; dMARK; \
+ I32 ax = mark - PL_stack_base + 1; \
+ I32 items = sp - mark
+
+# endif
+
+/* avoid -Wall; DB_File xsubs never make use of `ix' setup for ALIASes */
+# undef dXSI32
+# define dXSI32 dNOOP
+
+#endif /* Perl >= 5.7 */
+
+#ifndef pTHX
+# define pTHX
+# define pTHX_
+# define aTHX
+# define aTHX_
+#endif
+
+#ifndef newSVpvn
+# define newSVpvn(a,b) newSVpv(a,b)
+#endif
+
+#include <fcntl.h>
+
+/* #define TRACE */
+#define DBM_FILTERING
+
+#ifdef TRACE
+# define Trace(x) printf x
+#else
+# define Trace(x)
+#endif
+
+
+#define DBT_clear(x) Zero(&x, 1, DBT) ;
+
+#ifdef DB_VERSION_MAJOR
+
+#if DB_VERSION_MAJOR == 2
+# define BERKELEY_DB_1_OR_2
+#endif
+
+#if DB_VERSION_MAJOR > 3 || (DB_VERSION_MAJOR == 3 && DB_VERSION_MINOR >= 2)
+# define AT_LEAST_DB_3_2
+#endif
+
+/* map version 2 features & constants onto their version 1 equivalent */
+
+#ifdef DB_Prefix_t
+# undef DB_Prefix_t
+#endif
+#define DB_Prefix_t size_t
+
+#ifdef DB_Hash_t
+# undef DB_Hash_t
+#endif
+#define DB_Hash_t u_int32_t
+
+/* DBTYPE stays the same */
+/* HASHINFO, RECNOINFO and BTREEINFO map to DB_INFO */
+#if DB_VERSION_MAJOR == 2
+ typedef DB_INFO INFO ;
+#else /* DB_VERSION_MAJOR > 2 */
+# define DB_FIXEDLEN (0x8000)
+#endif /* DB_VERSION_MAJOR == 2 */
+
+/* version 2 has db_recno_t in place of recno_t */
+typedef db_recno_t recno_t;
+
+
+#define R_CURSOR DB_SET_RANGE
+#define R_FIRST DB_FIRST
+#define R_IAFTER DB_AFTER
+#define R_IBEFORE DB_BEFORE
+#define R_LAST DB_LAST
+#define R_NEXT DB_NEXT
+#define R_NOOVERWRITE DB_NOOVERWRITE
+#define R_PREV DB_PREV
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define R_SETCURSOR 0x800000
+#else
+# define R_SETCURSOR (-100)
+#endif
+
+#define R_RECNOSYNC 0
+#define R_FIXEDLEN DB_FIXEDLEN
+#define R_DUP DB_DUP
+
+
+#define db_HA_hash h_hash
+#define db_HA_ffactor h_ffactor
+#define db_HA_nelem h_nelem
+#define db_HA_bsize db_pagesize
+#define db_HA_cachesize db_cachesize
+#define db_HA_lorder db_lorder
+
+#define db_BT_compare bt_compare
+#define db_BT_prefix bt_prefix
+#define db_BT_flags flags
+#define db_BT_psize db_pagesize
+#define db_BT_cachesize db_cachesize
+#define db_BT_lorder db_lorder
+#define db_BT_maxkeypage
+#define db_BT_minkeypage
+
+
+#define db_RE_reclen re_len
+#define db_RE_flags flags
+#define db_RE_bval re_pad
+#define db_RE_bfname re_source
+#define db_RE_psize db_pagesize
+#define db_RE_cachesize db_cachesize
+#define db_RE_lorder db_lorder
+
+#define TXN NULL,
+
+#define do_SEQ(db, key, value, flag) (db->cursor->c_get)(db->cursor, &key, &value, flag)
+
+
+#define DBT_flags(x) x.flags = 0
+#define DB_flags(x, v) x |= v
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 5
+# define flagSet(flags, bitmask) ((flags) & (bitmask))
+#else
+# define flagSet(flags, bitmask) (((flags) & DB_OPFLAGS_MASK) == (bitmask))
+#endif
+
+#else /* db version 1.x */
+
+#define BERKELEY_DB_1
+#define BERKELEY_DB_1_OR_2
+
+typedef union INFO {
+ HASHINFO hash ;
+ RECNOINFO recno ;
+ BTREEINFO btree ;
+ } INFO ;
+
+
+#ifdef mDB_Prefix_t
+# ifdef DB_Prefix_t
+# undef DB_Prefix_t
+# endif
+# define DB_Prefix_t mDB_Prefix_t
+#endif
+
+#ifdef mDB_Hash_t
+# ifdef DB_Hash_t
+# undef DB_Hash_t
+# endif
+# define DB_Hash_t mDB_Hash_t
+#endif
+
+#define db_HA_hash hash.hash
+#define db_HA_ffactor hash.ffactor
+#define db_HA_nelem hash.nelem
+#define db_HA_bsize hash.bsize
+#define db_HA_cachesize hash.cachesize
+#define db_HA_lorder hash.lorder
+
+#define db_BT_compare btree.compare
+#define db_BT_prefix btree.prefix
+#define db_BT_flags btree.flags
+#define db_BT_psize btree.psize
+#define db_BT_cachesize btree.cachesize
+#define db_BT_lorder btree.lorder
+#define db_BT_maxkeypage btree.maxkeypage
+#define db_BT_minkeypage btree.minkeypage
+
+#define db_RE_reclen recno.reclen
+#define db_RE_flags recno.flags
+#define db_RE_bval recno.bval
+#define db_RE_bfname recno.bfname
+#define db_RE_psize recno.psize
+#define db_RE_cachesize recno.cachesize
+#define db_RE_lorder recno.lorder
+
+#define TXN
+
+#define do_SEQ(db, key, value, flag) (db->dbp->seq)(db->dbp, &key, &value, flag)
+#define DBT_flags(x)
+#define DB_flags(x, v)
+#define flagSet(flags, bitmask) ((flags) & (bitmask))
+
+#endif /* db version 1 */
+
+
+
+#define db_DELETE(db, key, flags) ((db->dbp)->del)(db->dbp, TXN &key, flags)
+#define db_STORE(db, key, value, flags) ((db->dbp)->put)(db->dbp, TXN &key, &value, flags)
+#define db_FETCH(db, key, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, flags)
+
+#define db_sync(db, flags) ((db->dbp)->sync)(db->dbp, flags)
+#define db_get(db, key, value, flags) ((db->dbp)->get)(db->dbp, TXN &key, &value, flags)
+
+#ifdef DB_VERSION_MAJOR
+#define db_DESTROY(db) ( db->cursor->c_close(db->cursor),\
+ (db->dbp->close)(db->dbp, 0) )
+#define db_close(db) ((db->dbp)->close)(db->dbp, 0)
+#define db_del(db, key, flags) (flagSet(flags, R_CURSOR) \
+ ? ((db->cursor)->c_del)(db->cursor, 0) \
+ : ((db->dbp)->del)(db->dbp, NULL, &key, flags) )
+
+#else /* ! DB_VERSION_MAJOR */
+
+#define db_DESTROY(db) ((db->dbp)->close)(db->dbp)
+#define db_close(db) ((db->dbp)->close)(db->dbp)
+#define db_del(db, key, flags) ((db->dbp)->del)(db->dbp, &key, flags)
+#define db_put(db, key, value, flags) ((db->dbp)->put)(db->dbp, &key, &value, flags)
+
+#endif /* ! DB_VERSION_MAJOR */
+
+
+#define db_seq(db, key, value, flags) do_SEQ(db, key, value, flags)
+
+typedef struct {
+ DBTYPE type ;
+ DB * dbp ;
+ SV * compare ;
+ SV * prefix ;
+ SV * hash ;
+ int in_memory ;
+#ifdef BERKELEY_DB_1_OR_2
+ INFO info ;
+#endif
+#ifdef DB_VERSION_MAJOR
+ DBC * cursor ;
+#endif
+#ifdef DBM_FILTERING
+ SV * filter_fetch_key ;
+ SV * filter_store_key ;
+ SV * filter_fetch_value ;
+ SV * filter_store_value ;
+ int filtering ;
+#endif /* DBM_FILTERING */
+
+ } DB_File_type;
+
+typedef DB_File_type * DB_File ;
+typedef DBT DBTKEY ;
+
+#ifdef DBM_FILTERING
+
+#define ckFilter(arg,type,name) \
+ if (db->type) { \
+ SV * save_defsv ; \
+ /* printf("filtering %s\n", name) ;*/ \
+ if (db->filtering) \
+ croak("recursion detected in %s", name) ; \
+ db->filtering = TRUE ; \
+ save_defsv = newSVsv(DEFSV) ; \
+ sv_setsv(DEFSV, arg) ; \
+ PUSHMARK(sp) ; \
+ (void) perl_call_sv(db->type, G_DISCARD|G_NOARGS); \
+ sv_setsv(arg, DEFSV) ; \
+ sv_setsv(DEFSV, save_defsv) ; \
+ SvREFCNT_dec(save_defsv) ; \
+ db->filtering = FALSE ; \
+ /*printf("end of filtering %s\n", name) ;*/ \
+ }
+
+#else
+
+#define ckFilter(arg,type, name)
+
+#endif /* DBM_FILTERING */
+
+#define my_sv_setpvn(sv, d, s) sv_setpvn(sv, (s ? d : (void*)""), s)
+
+#define OutputValue(arg, name) \
+ { if (RETVAL == 0) { \
+ my_sv_setpvn(arg, name.data, name.size) ; \
+ ckFilter(arg, filter_fetch_value,"filter_fetch_value") ; \
+ } \
+ }
+
+#define OutputKey(arg, name) \
+ { if (RETVAL == 0) \
+ { \
+ if (db->type != DB_RECNO) { \
+ my_sv_setpvn(arg, name.data, name.size); \
+ } \
+ else \
+ sv_setiv(arg, (I32)*(I32*)name.data - 1); \
+ ckFilter(arg, filter_fetch_key,"filter_fetch_key") ; \
+ } \
+ }
+
+
+#ifdef CAN_PROTOTYPE
+extern void __getBerkeleyDBInfo(void);
+#endif
+
+/* Internal Global Data */
+static recno_t Value ;
+static recno_t zero = 0 ;
+static DB_File CurrentDB ;
+static DBTKEY empty ;
+
+#ifdef DB_VERSION_MAJOR
+
+static int
+#ifdef CAN_PROTOTYPE
+db_put(DB_File db, DBTKEY key, DBT value, u_int flags)
+#else
+db_put(db, key, value, flags)
+DB_File db ;
+DBTKEY key ;
+DBT value ;
+u_int flags ;
+#endif
+{
+ int status ;
+
+ if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) {
+ DBC * temp_cursor ;
+ DBT l_key, l_value;
+
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor) != 0)
+#else
+ if (((db->dbp)->cursor)(db->dbp, NULL, &temp_cursor, 0) != 0)
+#endif
+ return (-1) ;
+
+ memset(&l_key, 0, sizeof(l_key));
+ l_key.data = key.data;
+ l_key.size = key.size;
+ memset(&l_value, 0, sizeof(l_value));
+ l_value.data = value.data;
+ l_value.size = value.size;
+
+ if ( temp_cursor->c_get(temp_cursor, &l_key, &l_value, DB_SET) != 0) {
+ (void)temp_cursor->c_close(temp_cursor);
+ return (-1);
+ }
+
+ status = temp_cursor->c_put(temp_cursor, &key, &value, flags);
+ (void)temp_cursor->c_close(temp_cursor);
+
+ return (status) ;
+ }
+
+
+ if (flagSet(flags, R_CURSOR)) {
+ return ((db->cursor)->c_put)(db->cursor, &key, &value, DB_CURRENT);
+ }
+
+ if (flagSet(flags, R_SETCURSOR)) {
+ if ((db->dbp)->put(db->dbp, NULL, &key, &value, 0) != 0)
+ return -1 ;
+ return ((db->cursor)->c_get)(db->cursor, &key, &value, DB_SET_RANGE);
+
+ }
+
+ return ((db->dbp)->put)(db->dbp, NULL, &key, &value, flags) ;
+
+}
+
+#endif /* DB_VERSION_MAJOR */
+
+
+static int
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+btree_compare(DB * db, const DBT *key1, const DBT *key2)
+#else
+btree_compare(db, key1, key2)
+DB * db ;
+const DBT * key1 ;
+const DBT * key2 ;
+#endif /* CAN_PROTOTYPE */
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+btree_compare(const DBT *key1, const DBT *key2)
+#else
+btree_compare(key1, key2)
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#endif
+
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ data1 = (char *) key1->data ;
+ data2 = (char *) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->compare, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ croak ("DB_File btree_compare: expected 1 return value from compare sub, got %d\n", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+ return (retval) ;
+
+}
+
+static DB_Prefix_t
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+btree_prefix(DB * db, const DBT *key1, const DBT *key2)
+#else
+btree_prefix(db, key1, key2)
+Db * db ;
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+btree_prefix(const DBT *key1, const DBT *key2)
+#else
+btree_prefix(key1, key2)
+const DBT * key1 ;
+const DBT * key2 ;
+#endif
+
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ char * data1, * data2 ;
+ int retval ;
+ int count ;
+
+ data1 = (char *) key1->data ;
+ data2 = (char *) key2->data ;
+
+#ifndef newSVpvn
+ /* As newSVpv will assume that the data pointer is a null terminated C
+ string if the size parameter is 0, make sure that data points to an
+ empty string if the length is 0
+ */
+ if (key1->size == 0)
+ data1 = "" ;
+ if (key2->size == 0)
+ data2 = "" ;
+#endif
+
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+ EXTEND(SP,2) ;
+ PUSHs(sv_2mortal(newSVpvn(data1,key1->size)));
+ PUSHs(sv_2mortal(newSVpvn(data2,key2->size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->prefix, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ croak ("DB_File btree_prefix: expected 1 return value from prefix sub, got %d\n", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+
+#ifdef BERKELEY_DB_1
+# define HASH_CB_SIZE_TYPE size_t
+#else
+# define HASH_CB_SIZE_TYPE u_int32_t
+#endif
+
+static DB_Hash_t
+#ifdef AT_LEAST_DB_3_2
+
+#ifdef CAN_PROTOTYPE
+hash_cb(DB * db, const void *data, u_int32_t size)
+#else
+hash_cb(db, data, size)
+DB * db ;
+const void * data ;
+HASH_CB_SIZE_TYPE size ;
+#endif
+
+#else /* Berkeley DB < 3.2 */
+
+#ifdef CAN_PROTOTYPE
+hash_cb(const void *data, HASH_CB_SIZE_TYPE size)
+#else
+hash_cb(data, size)
+const void * data ;
+HASH_CB_SIZE_TYPE size ;
+#endif
+
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ dSP ;
+ int retval ;
+ int count ;
+
+#ifndef newSVpvn
+ if (size == 0)
+ data = "" ;
+#endif
+
+ /* DGH - Next two lines added to fix corrupted stack problem */
+ ENTER ;
+ SAVETMPS;
+
+ PUSHMARK(SP) ;
+
+ XPUSHs(sv_2mortal(newSVpvn((char*)data,size)));
+ PUTBACK ;
+
+ count = perl_call_sv(CurrentDB->hash, G_SCALAR);
+
+ SPAGAIN ;
+
+ if (count != 1)
+ croak ("DB_File hash_cb: expected 1 return value from hash sub, got %d\n", count) ;
+
+ retval = POPi ;
+
+ PUTBACK ;
+ FREETMPS ;
+ LEAVE ;
+
+ return (retval) ;
+}
+
+
+#if defined(TRACE) && defined(BERKELEY_DB_1_OR_2)
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintHash(INFO *hash)
+#else
+PrintHash(hash)
+INFO * hash ;
+#endif
+{
+ printf ("HASH Info\n") ;
+ printf (" hash = %s\n",
+ (hash->db_HA_hash != NULL ? "redefined" : "default")) ;
+ printf (" bsize = %d\n", hash->db_HA_bsize) ;
+ printf (" ffactor = %d\n", hash->db_HA_ffactor) ;
+ printf (" nelem = %d\n", hash->db_HA_nelem) ;
+ printf (" cachesize = %d\n", hash->db_HA_cachesize) ;
+ printf (" lorder = %d\n", hash->db_HA_lorder) ;
+
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintRecno(INFO *recno)
+#else
+PrintRecno(recno)
+INFO * recno ;
+#endif
+{
+ printf ("RECNO Info\n") ;
+ printf (" flags = %d\n", recno->db_RE_flags) ;
+ printf (" cachesize = %d\n", recno->db_RE_cachesize) ;
+ printf (" psize = %d\n", recno->db_RE_psize) ;
+ printf (" lorder = %d\n", recno->db_RE_lorder) ;
+ printf (" reclen = %ul\n", (unsigned long)recno->db_RE_reclen) ;
+ printf (" bval = %d 0x%x\n", recno->db_RE_bval, recno->db_RE_bval) ;
+ printf (" bfname = %d [%s]\n", recno->db_RE_bfname, recno->db_RE_bfname) ;
+}
+
+static void
+#ifdef CAN_PROTOTYPE
+PrintBtree(INFO *btree)
+#else
+PrintBtree(btree)
+INFO * btree ;
+#endif
+{
+ printf ("BTREE Info\n") ;
+ printf (" compare = %s\n",
+ (btree->db_BT_compare ? "redefined" : "default")) ;
+ printf (" prefix = %s\n",
+ (btree->db_BT_prefix ? "redefined" : "default")) ;
+ printf (" flags = %d\n", btree->db_BT_flags) ;
+ printf (" cachesize = %d\n", btree->db_BT_cachesize) ;
+ printf (" psize = %d\n", btree->db_BT_psize) ;
+#ifndef DB_VERSION_MAJOR
+ printf (" maxkeypage = %d\n", btree->db_BT_maxkeypage) ;
+ printf (" minkeypage = %d\n", btree->db_BT_minkeypage) ;
+#endif
+ printf (" lorder = %d\n", btree->db_BT_lorder) ;
+}
+
+#else
+
+#define PrintRecno(recno)
+#define PrintHash(hash)
+#define PrintBtree(btree)
+
+#endif /* TRACE */
+
+
+static I32
+#ifdef CAN_PROTOTYPE
+GetArrayLength(pTHX_ DB_File db)
+#else
+GetArrayLength(db)
+DB_File db ;
+#endif
+{
+ DBT key ;
+ DBT value ;
+ int RETVAL ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+ if (RETVAL == 0)
+ RETVAL = *(I32 *)key.data ;
+ else /* No key means empty file */
+ RETVAL = 0 ;
+
+ return ((I32)RETVAL) ;
+}
+
+static recno_t
+#ifdef CAN_PROTOTYPE
+GetRecnoKey(pTHX_ DB_File db, I32 value)
+#else
+GetRecnoKey(db, value)
+DB_File db ;
+I32 value ;
+#endif
+{
+ if (value < 0) {
+ /* Get the length of the array */
+ I32 length = GetArrayLength(aTHX_ db) ;
+
+ /* check for attempt to write before start of array */
+ if (length + value + 1 <= 0)
+ croak("Modification of non-creatable array value attempted, subscript %ld", (long)value) ;
+
+ value = length + value + 1 ;
+ }
+ else
+ ++ value ;
+
+ return value ;
+}
+
+
+static DB_File
+#ifdef CAN_PROTOTYPE
+ParseOpenInfo(pTHX_ int isHASH, char *name, int flags, int mode, SV *sv)
+#else
+ParseOpenInfo(isHASH, name, flags, mode, sv)
+int isHASH ;
+char * name ;
+int flags ;
+int mode ;
+SV * sv ;
+#endif
+{
+
+#ifdef BERKELEY_DB_1_OR_2 /* Berkeley DB Version 1 or 2 */
+
+ SV ** svp;
+ HV * action ;
+ DB_File RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
+ void * openinfo = NULL ;
+ INFO * info = &RETVAL->info ;
+ STRLEN n_a;
+
+/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
+ Zero(RETVAL, 1, DB_File_type) ;
+
+ /* Default to HASH */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = 0 ;
+ RETVAL->filter_fetch_key = RETVAL->filter_store_key =
+ RETVAL->filter_fetch_value = RETVAL->filter_store_value =
+#endif /* DBM_FILTERING */
+ RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
+ RETVAL->type = DB_HASH ;
+
+ /* DGH - Next line added to avoid SEGV on existing hash DB */
+ CurrentDB = RETVAL;
+
+ /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
+ RETVAL->in_memory = (name == NULL) ;
+
+ if (sv)
+ {
+ if (! SvROK(sv) )
+ croak ("type parameter is not a reference") ;
+
+ svp = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
+ if (svp && SvOK(*svp))
+ action = (HV*) SvRV(*svp) ;
+ else
+ croak("internal error") ;
+
+ if (sv_isa(sv, "DB_File::HASHINFO"))
+ {
+
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_HASH database") ;
+
+ RETVAL->type = DB_HASH ;
+ openinfo = (void*)info ;
+
+ svp = hv_fetch(action, "hash", 4, FALSE);
+
+ if (svp && SvOK(*svp))
+ {
+ info->db_HA_hash = hash_cb ;
+ RETVAL->hash = newSVsv(*svp) ;
+ }
+ else
+ info->db_HA_hash = NULL ;
+
+ svp = hv_fetch(action, "ffactor", 7, FALSE);
+ info->db_HA_ffactor = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "nelem", 5, FALSE);
+ info->db_HA_nelem = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "bsize", 5, FALSE);
+ info->db_HA_bsize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_HA_cachesize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_HA_lorder = svp ? SvIV(*svp) : 0;
+
+ PrintHash(info) ;
+ }
+ else if (sv_isa(sv, "DB_File::BTREEINFO"))
+ {
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_BTREE database");
+
+ RETVAL->type = DB_BTREE ;
+ openinfo = (void*)info ;
+
+ svp = hv_fetch(action, "compare", 7, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ info->db_BT_compare = btree_compare ;
+ RETVAL->compare = newSVsv(*svp) ;
+ }
+ else
+ info->db_BT_compare = NULL ;
+
+ svp = hv_fetch(action, "prefix", 6, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ info->db_BT_prefix = btree_prefix ;
+ RETVAL->prefix = newSVsv(*svp) ;
+ }
+ else
+ info->db_BT_prefix = NULL ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ info->db_BT_flags = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_BT_cachesize = svp ? SvIV(*svp) : 0;
+
+#ifndef DB_VERSION_MAJOR
+ svp = hv_fetch(action, "minkeypage", 10, FALSE);
+ info->btree.minkeypage = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "maxkeypage", 10, FALSE);
+ info->btree.maxkeypage = svp ? SvIV(*svp) : 0;
+#endif
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ info->db_BT_psize = svp ? SvIV(*svp) : 0;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_BT_lorder = svp ? SvIV(*svp) : 0;
+
+ PrintBtree(info) ;
+
+ }
+ else if (sv_isa(sv, "DB_File::RECNOINFO"))
+ {
+ if (isHASH)
+ croak("DB_File can only tie an array to a DB_RECNO database");
+
+ RETVAL->type = DB_RECNO ;
+ openinfo = (void *)info ;
+
+ info->db_RE_flags = 0 ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ info->db_RE_flags = (u_long) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "reclen", 6, FALSE);
+ info->db_RE_reclen = (size_t) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ info->db_RE_cachesize = (u_int) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ info->db_RE_psize = (u_int) (svp ? SvIV(*svp) : 0);
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ info->db_RE_lorder = (int) (svp ? SvIV(*svp) : 0);
+
+#ifdef DB_VERSION_MAJOR
+ info->re_source = name ;
+ name = NULL ;
+#endif
+ svp = hv_fetch(action, "bfname", 6, FALSE);
+ if (svp && SvOK(*svp)) {
+ char * ptr = SvPV(*svp,n_a) ;
+#ifdef DB_VERSION_MAJOR
+ name = (char*) n_a ? ptr : NULL ;
+#else
+ info->db_RE_bfname = (char*) (n_a ? ptr : NULL) ;
+#endif
+ }
+ else
+#ifdef DB_VERSION_MAJOR
+ name = NULL ;
+#else
+ info->db_RE_bfname = NULL ;
+#endif
+
+ svp = hv_fetch(action, "bval", 4, FALSE);
+#ifdef DB_VERSION_MAJOR
+ if (svp && SvOK(*svp))
+ {
+ int value ;
+ if (SvPOK(*svp))
+ value = (int)*SvPV(*svp, n_a) ;
+ else
+ value = SvIV(*svp) ;
+
+ if (info->flags & DB_FIXEDLEN) {
+ info->re_pad = value ;
+ info->flags |= DB_PAD ;
+ }
+ else {
+ info->re_delim = value ;
+ info->flags |= DB_DELIMITER ;
+ }
+
+ }
+#else
+ if (svp && SvOK(*svp))
+ {
+ if (SvPOK(*svp))
+ info->db_RE_bval = (u_char)*SvPV(*svp, n_a) ;
+ else
+ info->db_RE_bval = (u_char)(unsigned long) SvIV(*svp) ;
+ DB_flags(info->flags, DB_DELIMITER) ;
+
+ }
+ else
+ {
+ if (info->db_RE_flags & R_FIXEDLEN)
+ info->db_RE_bval = (u_char) ' ' ;
+ else
+ info->db_RE_bval = (u_char) '\n' ;
+ DB_flags(info->flags, DB_DELIMITER) ;
+ }
+#endif
+
+#ifdef DB_RENUMBER
+ info->flags |= DB_RENUMBER ;
+#endif
+
+ PrintRecno(info) ;
+ }
+ else
+ croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
+ }
+
+
+ /* OS2 Specific Code */
+#ifdef OS2
+#ifdef __EMX__
+ flags |= O_BINARY;
+#endif /* __EMX__ */
+#endif /* OS2 */
+
+#ifdef DB_VERSION_MAJOR
+
+ {
+ int Flags = 0 ;
+ int status ;
+
+ /* Map 1.x flags to 2.x flags */
+ if ((flags & O_CREAT) == O_CREAT)
+ Flags |= DB_CREATE ;
+
+#if O_RDONLY == 0
+ if (flags == O_RDONLY)
+#else
+ if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
+#endif
+ Flags |= DB_RDONLY ;
+
+#ifdef O_TRUNC
+ if ((flags & O_TRUNC) == O_TRUNC)
+ Flags |= DB_TRUNCATE ;
+#endif
+
+ status = db_open(name, RETVAL->type, Flags, mode, NULL, openinfo, &RETVAL->dbp) ;
+ if (status == 0)
+#if DB_VERSION_MAJOR == 2 && DB_VERSION_MINOR < 6
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor) ;
+#else
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
+ 0) ;
+#endif
+
+ if (status)
+ RETVAL->dbp = NULL ;
+
+ }
+#else
+
+#if defined(DB_LIBRARY_COMPATIBILITY_API) && DB_VERSION_MAJOR > 2
+ RETVAL->dbp = __db185_open(name, flags, mode, RETVAL->type, openinfo) ;
+#else
+ RETVAL->dbp = dbopen(name, flags, mode, RETVAL->type, openinfo) ;
+#endif /* DB_LIBRARY_COMPATIBILITY_API */
+
+#endif
+
+ return (RETVAL) ;
+
+#else /* Berkeley DB Version > 2 */
+
+ SV ** svp;
+ HV * action ;
+ DB_File RETVAL = (DB_File)safemalloc(sizeof(DB_File_type)) ;
+ DB * dbp ;
+ STRLEN n_a;
+ int status ;
+
+/* printf("In ParseOpenInfo name=[%s] flags=[%d] mode = [%d]\n", name, flags, mode) ; */
+ Zero(RETVAL, 1, DB_File_type) ;
+
+ /* Default to HASH */
+#ifdef DBM_FILTERING
+ RETVAL->filtering = 0 ;
+ RETVAL->filter_fetch_key = RETVAL->filter_store_key =
+ RETVAL->filter_fetch_value = RETVAL->filter_store_value =
+#endif /* DBM_FILTERING */
+ RETVAL->hash = RETVAL->compare = RETVAL->prefix = NULL ;
+ RETVAL->type = DB_HASH ;
+
+ /* DGH - Next line added to avoid SEGV on existing hash DB */
+ CurrentDB = RETVAL;
+
+ /* fd for 1.86 hash in memory files doesn't return -1 like 1.85 */
+ RETVAL->in_memory = (name == NULL) ;
+
+ status = db_create(&RETVAL->dbp, NULL,0) ;
+ /* printf("db_create returned %d %s\n", status, db_strerror(status)) ; */
+ if (status) {
+ RETVAL->dbp = NULL ;
+ return (RETVAL) ;
+ }
+ dbp = RETVAL->dbp ;
+
+ if (sv)
+ {
+ if (! SvROK(sv) )
+ croak ("type parameter is not a reference") ;
+
+ svp = hv_fetch( (HV*)SvRV(sv), "GOT", 3, FALSE) ;
+ if (svp && SvOK(*svp))
+ action = (HV*) SvRV(*svp) ;
+ else
+ croak("internal error") ;
+
+ if (sv_isa(sv, "DB_File::HASHINFO"))
+ {
+
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_HASH database") ;
+
+ RETVAL->type = DB_HASH ;
+
+ svp = hv_fetch(action, "hash", 4, FALSE);
+
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_h_hash(dbp, hash_cb) ;
+ RETVAL->hash = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "ffactor", 7, FALSE);
+ if (svp)
+ (void)dbp->set_h_ffactor(dbp, SvIV(*svp)) ;
+
+ svp = hv_fetch(action, "nelem", 5, FALSE);
+ if (svp)
+ (void)dbp->set_h_nelem(dbp, SvIV(*svp)) ;
+
+ svp = hv_fetch(action, "bsize", 5, FALSE);
+ if (svp)
+ (void)dbp->set_pagesize(dbp, SvIV(*svp));
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp)
+ (void)dbp->set_cachesize(dbp, 0, SvIV(*svp), 0) ;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp)
+ (void)dbp->set_lorder(dbp, SvIV(*svp)) ;
+
+ PrintHash(info) ;
+ }
+ else if (sv_isa(sv, "DB_File::BTREEINFO"))
+ {
+ if (!isHASH)
+ croak("DB_File can only tie an associative array to a DB_BTREE database");
+
+ RETVAL->type = DB_BTREE ;
+
+ svp = hv_fetch(action, "compare", 7, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_bt_compare(dbp, btree_compare) ;
+ RETVAL->compare = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "prefix", 6, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ (void)dbp->set_bt_prefix(dbp, btree_prefix) ;
+ RETVAL->prefix = newSVsv(*svp) ;
+ }
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ if (svp)
+ (void)dbp->set_flags(dbp, SvIV(*svp)) ;
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp)
+ (void)dbp->set_cachesize(dbp, 0, SvIV(*svp), 0) ;
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ if (svp)
+ (void)dbp->set_pagesize(dbp, SvIV(*svp)) ;
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp)
+ (void)dbp->set_lorder(dbp, SvIV(*svp)) ;
+
+ PrintBtree(info) ;
+
+ }
+ else if (sv_isa(sv, "DB_File::RECNOINFO"))
+ {
+ int fixed = FALSE ;
+
+ if (isHASH)
+ croak("DB_File can only tie an array to a DB_RECNO database");
+
+ RETVAL->type = DB_RECNO ;
+
+ svp = hv_fetch(action, "flags", 5, FALSE);
+ if (svp) {
+ int flags = SvIV(*svp) ;
+ /* remove FIXDLEN, if present */
+ if (flags & DB_FIXEDLEN) {
+ fixed = TRUE ;
+ flags &= ~DB_FIXEDLEN ;
+ }
+ }
+
+ svp = hv_fetch(action, "cachesize", 9, FALSE);
+ if (svp) {
+ status = dbp->set_cachesize(dbp, 0, SvIV(*svp), 0) ;
+ }
+
+ svp = hv_fetch(action, "psize", 5, FALSE);
+ if (svp) {
+ status = dbp->set_pagesize(dbp, SvIV(*svp)) ;
+ }
+
+ svp = hv_fetch(action, "lorder", 6, FALSE);
+ if (svp) {
+ status = dbp->set_lorder(dbp, SvIV(*svp)) ;
+ }
+
+ svp = hv_fetch(action, "bval", 4, FALSE);
+ if (svp && SvOK(*svp))
+ {
+ int value ;
+ if (SvPOK(*svp))
+ value = (int)*SvPV(*svp, n_a) ;
+ else
+ value = SvIV(*svp) ;
+
+ if (fixed) {
+ status = dbp->set_re_pad(dbp, value) ;
+ }
+ else {
+ status = dbp->set_re_delim(dbp, value) ;
+ }
+
+ }
+
+ if (fixed) {
+ svp = hv_fetch(action, "reclen", 6, FALSE);
+ if (svp) {
+ u_int32_t len = (u_int32_t)SvIV(*svp) ;
+ status = dbp->set_re_len(dbp, len) ;
+ }
+ }
+
+ if (name != NULL) {
+ status = dbp->set_re_source(dbp, name) ;
+ name = NULL ;
+ }
+
+ svp = hv_fetch(action, "bfname", 6, FALSE);
+ if (svp && SvOK(*svp)) {
+ char * ptr = SvPV(*svp,n_a) ;
+ name = (char*) n_a ? ptr : NULL ;
+ }
+ else
+ name = NULL ;
+
+
+ status = dbp->set_flags(dbp, DB_RENUMBER) ;
+
+ if (flags){
+ (void)dbp->set_flags(dbp, flags) ;
+ }
+ PrintRecno(info) ;
+ }
+ else
+ croak("type is not of type DB_File::HASHINFO, DB_File::BTREEINFO or DB_File::RECNOINFO");
+ }
+
+ {
+ int Flags = 0 ;
+ int status ;
+
+ /* Map 1.x flags to 3.x flags */
+ if ((flags & O_CREAT) == O_CREAT)
+ Flags |= DB_CREATE ;
+
+#if O_RDONLY == 0
+ if (flags == O_RDONLY)
+#else
+ if ((flags & O_RDONLY) == O_RDONLY && (flags & O_RDWR) != O_RDWR)
+#endif
+ Flags |= DB_RDONLY ;
+
+#ifdef O_TRUNC
+ if ((flags & O_TRUNC) == O_TRUNC)
+ Flags |= DB_TRUNCATE ;
+#endif
+
+ status = (RETVAL->dbp->open)(RETVAL->dbp, name, NULL, RETVAL->type,
+ Flags, mode) ;
+ /* printf("open returned %d %s\n", status, db_strerror(status)) ; */
+
+ if (status == 0)
+ status = (RETVAL->dbp->cursor)(RETVAL->dbp, NULL, &RETVAL->cursor,
+ 0) ;
+ /* printf("cursor returned %d %s\n", status, db_strerror(status)) ; */
+
+ if (status)
+ RETVAL->dbp = NULL ;
+
+ }
+
+ return (RETVAL) ;
+
+#endif /* Berkeley DB Version > 2 */
+
+} /* ParseOpenInfo */
+
+
+static double
+#ifdef CAN_PROTOTYPE
+constant(char *name, int arg)
+#else
+constant(name, arg)
+char *name;
+int arg;
+#endif
+{
+ errno = 0;
+ switch (*name) {
+ case 'A':
+ break;
+ case 'B':
+ if (strEQ(name, "BTREEMAGIC"))
+#ifdef BTREEMAGIC
+ return BTREEMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "BTREEVERSION"))
+#ifdef BTREEVERSION
+ return BTREEVERSION;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'C':
+ break;
+ case 'D':
+ if (strEQ(name, "DB_LOCK"))
+#ifdef DB_LOCK
+ return DB_LOCK;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_SHMEM"))
+#ifdef DB_SHMEM
+ return DB_SHMEM;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "DB_TXN"))
+#ifdef DB_TXN
+ return (U32)DB_TXN;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'E':
+ break;
+ case 'F':
+ break;
+ case 'G':
+ break;
+ case 'H':
+ if (strEQ(name, "HASHMAGIC"))
+#ifdef HASHMAGIC
+ return HASHMAGIC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "HASHVERSION"))
+#ifdef HASHVERSION
+ return HASHVERSION;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'I':
+ break;
+ case 'J':
+ break;
+ case 'K':
+ break;
+ case 'L':
+ break;
+ case 'M':
+ if (strEQ(name, "MAX_PAGE_NUMBER"))
+#ifdef MAX_PAGE_NUMBER
+ return (U32)MAX_PAGE_NUMBER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "MAX_PAGE_OFFSET"))
+#ifdef MAX_PAGE_OFFSET
+ return MAX_PAGE_OFFSET;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "MAX_REC_NUMBER"))
+#ifdef MAX_REC_NUMBER
+ return (U32)MAX_REC_NUMBER;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'N':
+ break;
+ case 'O':
+ break;
+ case 'P':
+ break;
+ case 'Q':
+ break;
+ case 'R':
+ if (strEQ(name, "RET_ERROR"))
+#ifdef RET_ERROR
+ return RET_ERROR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "RET_SPECIAL"))
+#ifdef RET_SPECIAL
+ return RET_SPECIAL;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "RET_SUCCESS"))
+#ifdef RET_SUCCESS
+ return RET_SUCCESS;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_CURSOR"))
+#ifdef R_CURSOR
+ return R_CURSOR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_DUP"))
+#ifdef R_DUP
+ return R_DUP;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_FIRST"))
+#ifdef R_FIRST
+ return R_FIRST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_FIXEDLEN"))
+#ifdef R_FIXEDLEN
+ return R_FIXEDLEN;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_IAFTER"))
+#ifdef R_IAFTER
+ return R_IAFTER;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_IBEFORE"))
+#ifdef R_IBEFORE
+ return R_IBEFORE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_LAST"))
+#ifdef R_LAST
+ return R_LAST;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_NEXT"))
+#ifdef R_NEXT
+ return R_NEXT;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_NOKEY"))
+#ifdef R_NOKEY
+ return R_NOKEY;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_NOOVERWRITE"))
+#ifdef R_NOOVERWRITE
+ return R_NOOVERWRITE;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_PREV"))
+#ifdef R_PREV
+ return R_PREV;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_RECNOSYNC"))
+#ifdef R_RECNOSYNC
+ return R_RECNOSYNC;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_SETCURSOR"))
+#ifdef R_SETCURSOR
+ return R_SETCURSOR;
+#else
+ goto not_there;
+#endif
+ if (strEQ(name, "R_SNAPSHOT"))
+#ifdef R_SNAPSHOT
+ return R_SNAPSHOT;
+#else
+ goto not_there;
+#endif
+ break;
+ case 'S':
+ break;
+ case 'T':
+ break;
+ case 'U':
+ break;
+ case 'V':
+ break;
+ case 'W':
+ break;
+ case 'X':
+ break;
+ case 'Y':
+ break;
+ case 'Z':
+ break;
+ case '_':
+ break;
+ }
+ errno = EINVAL;
+ return 0;
+
+not_there:
+ errno = ENOENT;
+ return 0;
+}
+
+MODULE = DB_File PACKAGE = DB_File PREFIX = db_
+
+BOOT:
+ {
+ __getBerkeleyDBInfo() ;
+
+ DBT_clear(empty) ;
+ empty.data = &zero ;
+ empty.size = sizeof(recno_t) ;
+ }
+
+double
+constant(name,arg)
+ char * name
+ int arg
+
+
+DB_File
+db_DoTie_(isHASH, dbtype, name=undef, flags=O_CREAT|O_RDWR, mode=0666, type=DB_HASH)
+ int isHASH
+ char * dbtype
+ int flags
+ int mode
+ CODE:
+ {
+ char * name = (char *) NULL ;
+ SV * sv = (SV *) NULL ;
+ STRLEN n_a;
+
+ if (items >= 3 && SvOK(ST(2)))
+ name = (char*) SvPV(ST(2), n_a) ;
+
+ if (items == 6)
+ sv = ST(5) ;
+
+ RETVAL = ParseOpenInfo(aTHX_ isHASH, name, flags, mode, sv) ;
+ if (RETVAL->dbp == NULL)
+ RETVAL = NULL ;
+ }
+ OUTPUT:
+ RETVAL
+
+int
+db_DESTROY(db)
+ DB_File db
+ INIT:
+ CurrentDB = db ;
+ CLEANUP:
+ if (db->hash)
+ SvREFCNT_dec(db->hash) ;
+ if (db->compare)
+ SvREFCNT_dec(db->compare) ;
+ if (db->prefix)
+ SvREFCNT_dec(db->prefix) ;
+#ifdef DBM_FILTERING
+ if (db->filter_fetch_key)
+ SvREFCNT_dec(db->filter_fetch_key) ;
+ if (db->filter_store_key)
+ SvREFCNT_dec(db->filter_store_key) ;
+ if (db->filter_fetch_value)
+ SvREFCNT_dec(db->filter_fetch_value) ;
+ if (db->filter_store_value)
+ SvREFCNT_dec(db->filter_store_value) ;
+#endif /* DBM_FILTERING */
+ safefree(db) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+#endif
+
+
+int
+db_DELETE(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ INIT:
+ CurrentDB = db ;
+
+
+int
+db_EXISTS(db, key)
+ DB_File db
+ DBTKEY key
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = (((db->dbp)->get)(db->dbp, TXN &key, &value, 0) == 0) ;
+ }
+ OUTPUT:
+ RETVAL
+
+void
+db_FETCH(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ PREINIT:
+ int RETVAL;
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ /* RETVAL = ((db->dbp)->get)(db->dbp, TXN &key, &value, flags) ; */
+ RETVAL = db_get(db, key, value, flags) ;
+ ST(0) = sv_newmortal();
+ OutputValue(ST(0), value)
+ }
+
+int
+db_STORE(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value
+ u_int flags
+ INIT:
+ CurrentDB = db ;
+
+
+void
+db_FIRSTKEY(db)
+ DB_File db
+ PREINIT:
+ int RETVAL;
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = do_SEQ(db, key, value, R_FIRST) ;
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key) ;
+ }
+
+void
+db_NEXTKEY(db, key)
+ DB_File db
+ DBTKEY key
+ PREINIT:
+ int RETVAL;
+ CODE:
+ {
+ DBT value ;
+
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ RETVAL = do_SEQ(db, key, value, R_NEXT) ;
+ ST(0) = sv_newmortal();
+ OutputKey(ST(0), key) ;
+ }
+
+#
+# These would be nice for RECNO
+#
+
+int
+unshift(db, ...)
+ DB_File db
+ ALIAS: UNSHIFT = 1
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ int i ;
+ int One ;
+ STRLEN n_a;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+#ifdef DB_VERSION_MAJOR
+ /* get the first value */
+ RETVAL = do_SEQ(db, key, value, DB_FIRST) ;
+ RETVAL = 0 ;
+#else
+ RETVAL = -1 ;
+#endif
+ for (i = items-1 ; i > 0 ; --i)
+ {
+ value.data = SvPV(ST(i), n_a) ;
+ value.size = n_a ;
+ One = 1 ;
+ key.data = &One ;
+ key.size = sizeof(int) ;
+#ifdef DB_VERSION_MAJOR
+ RETVAL = (db->cursor->c_put)(db->cursor, &key, &value, DB_BEFORE) ;
+#else
+ RETVAL = (db->dbp->put)(db->dbp, &key, &value, R_IBEFORE) ;
+#endif
+ if (RETVAL != 0)
+ break;
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+void
+pop(db)
+ DB_File db
+ ALIAS: POP = 1
+ PREINIT:
+ I32 RETVAL;
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+
+ /* First get the final value */
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+ ST(0) = sv_newmortal();
+ /* Now delete it */
+ if (RETVAL == 0)
+ {
+ /* the call to del will trash value, so take a copy now */
+ OutputValue(ST(0), value) ;
+ RETVAL = db_del(db, key, R_CURSOR) ;
+ if (RETVAL != 0)
+ sv_setsv(ST(0), &PL_sv_undef);
+ }
+ }
+
+void
+shift(db)
+ DB_File db
+ ALIAS: SHIFT = 1
+ PREINIT:
+ I32 RETVAL;
+ CODE:
+ {
+ DBT value ;
+ DBTKEY key ;
+
+ DBT_clear(key) ;
+ DBT_clear(value) ;
+ CurrentDB = db ;
+ /* get the first value */
+ RETVAL = do_SEQ(db, key, value, R_FIRST) ;
+ ST(0) = sv_newmortal();
+ /* Now delete it */
+ if (RETVAL == 0)
+ {
+ /* the call to del will trash value, so take a copy now */
+ OutputValue(ST(0), value) ;
+ RETVAL = db_del(db, key, R_CURSOR) ;
+ if (RETVAL != 0)
+ sv_setsv (ST(0), &PL_sv_undef) ;
+ }
+ }
+
+
+I32
+push(db, ...)
+ DB_File db
+ ALIAS: PUSH = 1
+ CODE:
+ {
+ DBTKEY key ;
+ DBT value ;
+ DB * Db = db->dbp ;
+ int i ;
+ STRLEN n_a;
+ int keyval ;
+
+ DBT_flags(key) ;
+ DBT_flags(value) ;
+ CurrentDB = db ;
+ /* Set the Cursor to the Last element */
+ RETVAL = do_SEQ(db, key, value, R_LAST) ;
+#ifndef DB_VERSION_MAJOR
+ if (RETVAL >= 0)
+#endif
+ {
+ if (RETVAL == 0)
+ keyval = *(int*)key.data ;
+ else
+ keyval = 0 ;
+ for (i = 1 ; i < items ; ++i)
+ {
+ value.data = SvPV(ST(i), n_a) ;
+ value.size = n_a ;
+ ++ keyval ;
+ key.data = &keyval ;
+ key.size = sizeof(int) ;
+ RETVAL = (Db->put)(Db, TXN &key, &value, 0) ;
+ if (RETVAL != 0)
+ break;
+ }
+ }
+ }
+ OUTPUT:
+ RETVAL
+
+I32
+length(db)
+ DB_File db
+ ALIAS: FETCHSIZE = 1
+ CODE:
+ CurrentDB = db ;
+ RETVAL = GetArrayLength(aTHX_ db) ;
+ OUTPUT:
+ RETVAL
+
+
+#
+# Now provide an interface to the rest of the DB functionality
+#
+
+int
+db_del(db, key, flags=0)
+ DB_File db
+ DBTKEY key
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_del(db, key, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+
+
+int
+db_get(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value = NO_INIT
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ RETVAL = db_get(db, key, value, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ value
+
+int
+db_put(db, key, value, flags=0)
+ DB_File db
+ DBTKEY key
+ DBT value
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_put(db, key, value, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_KEYEXIST)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ key if (flagSet(flags, R_IAFTER) || flagSet(flags, R_IBEFORE)) OutputKey(ST(1), key);
+
+int
+db_fd(db)
+ DB_File db
+ CODE:
+ CurrentDB = db ;
+#ifdef DB_VERSION_MAJOR
+ RETVAL = -1 ;
+ {
+ int status = 0 ;
+ status = (db->in_memory
+ ? -1
+ : ((db->dbp)->fd)(db->dbp, &RETVAL) ) ;
+ if (status != 0)
+ RETVAL = -1 ;
+ }
+#else
+ RETVAL = (db->in_memory
+ ? -1
+ : ((db->dbp)->fd)(db->dbp) ) ;
+#endif
+ OUTPUT:
+ RETVAL
+
+int
+db_sync(db, flags=0)
+ DB_File db
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ RETVAL = db_sync(db, flags) ;
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+#endif
+ OUTPUT:
+ RETVAL
+
+
+int
+db_seq(db, key, value, flags)
+ DB_File db
+ DBTKEY key
+ DBT value = NO_INIT
+ u_int flags
+ CODE:
+ CurrentDB = db ;
+ DBT_clear(value) ;
+ RETVAL = db_seq(db, key, value, flags);
+#ifdef DB_VERSION_MAJOR
+ if (RETVAL > 0)
+ RETVAL = -1 ;
+ else if (RETVAL == DB_NOTFOUND)
+ RETVAL = 1 ;
+#endif
+ OUTPUT:
+ RETVAL
+ key
+ value
+
+#ifdef DBM_FILTERING
+
+#define setFilter(type) \
+ { \
+ if (db->type) \
+ RETVAL = sv_mortalcopy(db->type) ; \
+ ST(0) = RETVAL ; \
+ if (db->type && (code == &PL_sv_undef)) { \
+ SvREFCNT_dec(db->type) ; \
+ db->type = NULL ; \
+ } \
+ else if (code) { \
+ if (db->type) \
+ sv_setsv(db->type, code) ; \
+ else \
+ db->type = newSVsv(code) ; \
+ } \
+ }
+
+
+SV *
+filter_fetch_key(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_fetch_key) ;
+
+SV *
+filter_store_key(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_store_key) ;
+
+SV *
+filter_fetch_value(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_fetch_value) ;
+
+SV *
+filter_store_value(db, code)
+ DB_File db
+ SV * code
+ SV * RETVAL = &PL_sv_undef ;
+ CODE:
+ setFilter(filter_store_value) ;
+
+#endif /* DBM_FILTERING */
diff --git a/db/perl/DB_File/DB_File_BS b/db/perl/DB_File/DB_File_BS
new file mode 100644
index 000000000..9282c4988
--- /dev/null
+++ b/db/perl/DB_File/DB_File_BS
@@ -0,0 +1,6 @@
+# NeXT needs /usr/lib/libposix.a to load along with DB_File.so
+if ( $dlsrc eq "dl_next.xs" ) {
+ @DynaLoader::dl_resolve_using = ( '/usr/lib/libposix.a' );
+}
+
+1;
diff --git a/db/perl/DB_File/MANIFEST b/db/perl/DB_File/MANIFEST
new file mode 100644
index 000000000..0cc30dbfb
--- /dev/null
+++ b/db/perl/DB_File/MANIFEST
@@ -0,0 +1,27 @@
+Makefile.PL
+DB_File.pm
+DB_File.xs
+DB_File_BS
+Changes
+config.in
+dbinfo
+hints/dynixptx.pl
+hints/sco.pl
+MANIFEST
+README
+typemap
+t/db-btree.t
+t/db-hash.t
+t/db-recno.t
+version.c
+patches/5.004
+patches/5.004_01
+patches/5.004_02
+patches/5.004_03
+patches/5.004_04
+patches/5.004_05
+patches/5.005
+patches/5.005_01
+patches/5.005_02
+patches/5.005_03
+patches/5.6.0
diff --git a/db/perl/DB_File/Makefile.PL b/db/perl/DB_File/Makefile.PL
new file mode 100644
index 000000000..3a0b8196d
--- /dev/null
+++ b/db/perl/DB_File/Makefile.PL
@@ -0,0 +1,196 @@
+#! perl -w
+use strict ;
+use ExtUtils::MakeMaker 5.16 ;
+use Config ;
+
+my $VER_INFO ;
+my $LIB_DIR ;
+my $INC_DIR ;
+my $DB_NAME ;
+my $LIBS ;
+my $COMPAT185 = "" ;
+
+my @files = ('DB_File.pm', glob "t/*.t") ;
+# warnings pragma is stable from 5.6.1 onward
+if ($] < 5.006001)
+ { oldWarnings(@files) }
+else
+ { newWarnings(@files) }
+
+ParseCONFIG() ;
+
+if (defined $DB_NAME)
+ { $LIBS = $DB_NAME }
+else {
+ if ($^O eq 'MSWin32')
+ { $LIBS = '-llibdb' }
+ else
+ { $LIBS = '-ldb' }
+}
+
+# Solaris is special.
+#$LIBS .= " -lthread" if $^O eq 'solaris' ;
+
+# AIX is special.
+$LIBS .= " -lpthread" if $^O eq 'aix' ;
+
+# OS2 is a special case, so check for it now.
+my $OS2 = "" ;
+$OS2 = "-DOS2" if $Config{'osname'} eq 'os2' ;
+
+WriteMakefile(
+ NAME => 'DB_File',
+ LIBS => ["-L${LIB_DIR} $LIBS"],
+ MAN3PODS => ' ', # Pods will be built by installman.
+ INC => "-I$INC_DIR",
+ VERSION_FROM => 'DB_File.pm',
+ XSPROTOARG => '-noprototypes',
+ DEFINE => "$OS2 $VER_INFO $COMPAT185",
+ OBJECT => 'version$(OBJ_EXT) DB_File$(OBJ_EXT)',
+ OPTIMIZE => '-g',
+ 'macro' => { INSTALLDIRS => 'perl' },
+ 'dist' => {COMPRESS=>'gzip', SUFFIX=>'gz',
+ DIST_DEFAULT => 'MyDoubleCheck tardist'},
+ );
+
+
+sub MY::postamble {
+ '
+
+MyDoubleCheck:
+ @echo Checking config.in is setup for a release
+ @(grep "^LIB.*/usr/local/BerkeleyDB" config.in && \
+ grep "^INCLUDE.*/usr/local/BerkeleyDB" config.in && \
+ grep "^#DBNAME.*" config.in) >/dev/null || \
+ (echo config.in needs fixing ; exit 1)
+ @echo config.in is ok
+
+version$(OBJ_EXT): version.c
+
+$(NAME).xs: typemap
+ @$(TOUCH) $(NAME).xs
+
+Makefile: config.in
+
+' ;
+}
+
+
+sub ParseCONFIG
+{
+ my ($k, $v) ;
+ my @badkey = () ;
+ my %Info = () ;
+ my @Options = qw( INCLUDE LIB PREFIX HASH DBNAME COMPAT185 ) ;
+ my %ValidOption = map {$_, 1} @Options ;
+ my %Parsed = %ValidOption ;
+ my $CONFIG = 'config.in' ;
+
+ print "Parsing $CONFIG...\n" ;
+
+ # DBNAME & COMPAT185 are optional, so pretend they have
+ # been parsed.
+ delete $Parsed{'DBNAME'} ;
+ delete $Parsed{'COMPAT185'} ;
+ $Info{COMPAT185} = "No" ;
+
+
+ open(F, "$CONFIG") or die "Cannot open file $CONFIG: $!\n" ;
+ while (<F>) {
+ s/^\s*|\s*$//g ;
+ next if /^\s*$/ or /^\s*#/ ;
+ s/\s*#\s*$// ;
+
+ ($k, $v) = split(/\s+=\s+/, $_, 2) ;
+ $k = uc $k ;
+ if ($ValidOption{$k}) {
+ delete $Parsed{$k} ;
+ $Info{$k} = $v ;
+ }
+ else {
+ push(@badkey, $k) ;
+ }
+ }
+ close F ;
+
+ print "Unknown keys in $CONFIG ignored [@badkey]\n"
+ if @badkey ;
+
+ # check parsed values
+ my @missing = () ;
+ die "The following keys are missing from $CONFIG file: [@missing]\n"
+ if @missing = keys %Parsed ;
+
+ $INC_DIR = $ENV{'DB_FILE_INCLUDE'} || $Info{'INCLUDE'} ;
+ $LIB_DIR = $ENV{'DB_FILE_LIB'} || $Info{'LIB'} ;
+ $DB_NAME = $Info{'DBNAME'} if defined $Info{'DBNAME'} ;
+ $COMPAT185 = "-DCOMPAT185 -DDB_LIBRARY_COMPATIBILITY_API"
+ if (defined $ENV{'DB_FILE_COMPAT185'} &&
+ $ENV{'DB_FILE_COMPAT185'} =~ /^\s*(on|true|1)\s*$/i) ||
+ $Info{'COMPAT185'} =~ /^\s*(on|true|1)\s*$/i ;
+ my $PREFIX = $Info{'PREFIX'} ;
+ my $HASH = $Info{'HASH'} ;
+
+ $VER_INFO = "-DmDB_Prefix_t=${PREFIX} -DmDB_Hash_t=${HASH}" ;
+
+ print <<EOM if 0 ;
+ INCLUDE [$INC_DIR]
+ LIB [$LIB_DIR]
+ HASH [$HASH]
+ PREFIX [$PREFIX]
+ DBNAME [$DB_NAME]
+
+EOM
+
+ print "Looks Good.\n" ;
+
+}
+
+sub oldWarnings
+{
+ local ($^I) = ".bak" ;
+ local (@ARGV) = @_ ;
+
+ while (<>)
+ {
+ if (/^__END__/)
+ {
+ print ;
+ my $this = $ARGV ;
+ while (<>)
+ {
+ last if $ARGV ne $this ;
+ print ;
+ }
+ }
+
+ s/^(\s*)(no\s+warnings)/${1}local (\$^W) = 0; #$2/ ;
+ s/^(\s*)(use\s+warnings)/${1}local (\$^W) = 1; #$2/ ;
+ print ;
+ }
+}
+
+sub newWarnings
+{
+ local ($^I) = ".bak" ;
+ local (@ARGV) = @_ ;
+
+ while (<>)
+ {
+ if (/^__END__/)
+ {
+ my $this = $ARGV ;
+ print ;
+ while (<>)
+ {
+ last if $ARGV ne $this ;
+ print ;
+ }
+ }
+
+ s/^(\s*)local\s*\(\$\^W\)\s*=\s*\d+\s*;\s*#\s*((no|use)\s+warnings.*)/$1$2/ ;
+ print ;
+ }
+}
+
+# end of file Makefile.PL
diff --git a/db/perl/DB_File/README b/db/perl/DB_File/README
new file mode 100644
index 000000000..3e9934550
--- /dev/null
+++ b/db/perl/DB_File/README
@@ -0,0 +1,396 @@
+ DB_File
+
+ Version 1.78
+
+ 30th July 2001
+
+ Copyright (c) 1995-2001 Paul Marquess. All rights reserved. This
+ program is free software; you can redistribute it and/or modify
+ it under the same terms as Perl itself.
+
+
+IMPORTANT NOTICE
+================
+
+If are using the locking technique described in older versions of
+DB_File, please read the section called "Locking: The Trouble with fd"
+in DB_File.pm immediately. The locking method has been found to be
+unsafe. You risk corrupting your data if you continue to use it.
+
+DESCRIPTION
+-----------
+
+DB_File is a module which allows Perl programs to make use of the
+facilities provided by Berkeley DB version 1. (DB_File can be built
+version 2 or 3 of Berkeley DB, but it will only support the 1.x
+features),
+
+If you want to make use of the new features available in Berkeley DB
+2.x or 3.x, use the Perl module BerkeleyDB instead.
+
+Berkeley DB is a C library which provides a consistent interface to a
+number of database formats. DB_File provides an interface to all three
+of the database types (hash, btree and recno) currently supported by
+Berkeley DB.
+
+For further details see the documentation included at the end of the
+file DB_File.pm.
+
+PREREQUISITES
+-------------
+
+Before you can build DB_File you must have the following installed on
+your system:
+
+ * Perl 5.004 or greater.
+
+ * Berkeley DB.
+
+ The official web site for Berkeley DB is http://www.sleepycat.com.
+ The latest version of Berkeley DB is always available there. It
+ is recommended that you use the most recent version available at
+ the Sleepycat site.
+
+ The one exception to this advice is where you want to use DB_File
+ to access database files created by a third-party application, like
+ Sendmail or Netscape. In these cases you must build DB_File with a
+ compatible version of Berkeley DB.
+
+ If you want to use Berkeley DB 2.x, you must have version 2.3.4
+ or greater. If you want to use Berkeley DB 3.x, any version will
+ do. For Berkeley DB 1.x, use either version 1.85 or 1.86.
+
+
+BUILDING THE MODULE
+-------------------
+
+Assuming you have met all the prerequisites, building the module should
+be relatively straightforward.
+
+Step 1 : If you are running either Solaris 2.5 or HP-UX 10 and want
+ to use Berkeley DB version 2 or 3, read either the Solaris Notes
+ or HP-UX Notes sections below. If you are running Linux please
+ read the Linux Notes section before proceeding.
+
+Step 2 : Edit the file config.in to suit you local installation.
+ Instructions are given in the file.
+
+Step 3 : Build and test the module using this sequence of commands:
+
+ perl Makefile.PL
+ make
+ make test
+
+
+ NOTE:
+ If you have a very old version of Berkeley DB (i.e. pre 1.85),
+ three of the tests in the recno test harness may fail (tests 51,
+ 53 and 55). You can safely ignore the errors if you're never
+ going to use the broken functionality (recno databases with a
+ modified bval). Otherwise you'll have to upgrade your DB
+ library.
+
+
+INSTALLATION
+------------
+
+ make install
+
+
+TROUBLESHOOTING
+===============
+
+Here are some of the common problems people encounter when building
+DB_File.
+
+Missing db.h or libdb.a
+-----------------------
+
+If you get an error like this:
+
+ cc -c -I/usr/local/include -Dbool=char -DHAS_BOOL
+ -O2 -DVERSION=\"1.64\" -DXS_VERSION=\"1.64\" -fpic
+ -I/usr/local/lib/perl5/i586-linux/5.00404/CORE -DmDB_Prefix_t=size_t
+ -DmDB_Hash_t=u_int32_t DB_File.c
+ DB_File.xs:101: db.h: No such file or directory
+
+or this:
+
+ LD_RUN_PATH="/lib" cc -o blib/arch/auto/DB_File/DB_File.so -shared
+ -L/usr/local/lib DB_File.o -L/usr/local/lib -ldb
+ ld: cannot open -ldb: No such file or directory
+
+This symptom can imply:
+
+ 1. You don't have Berkeley DB installed on your system at all.
+ Solution: get & install Berkeley DB.
+
+ 2. You do have Berkeley DB installed, but it isn't in a standard place.
+ Solution: Edit config.in and set the LIB and INCLUDE variables to point
+ to the directories where libdb.a and db.h are installed.
+
+
+Undefined symbol db_version
+---------------------------
+
+DB_File seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /usr/bin/perl5.00404 -I./blib/arch -I./blib/lib
+ -I/usr/local/lib/perl5/i586-linux/5.00404 -I/usr/local/lib/perl5 -e 'use
+ Test::Harness qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/db-btree..........Can't load './blib/arch/auto/DB_File/DB_File.so' for
+ module DB_File: ./blib/arch/auto/DB_File/DB_File.so: undefined symbol:
+ db_version at /usr/local/lib/perl5/i586-linux/5.00404/DynaLoader.pm
+ line 166.
+
+ at t/db-btree.t line 21
+ BEGIN failed--compilation aborted at t/db-btree.t line 21.
+ dubious Test returned status 2 (wstat 512, 0x200)
+
+This error usually happens when you have both version 1 and version
+2 of Berkeley DB installed on your system and DB_File attempts to
+build using the db.h for Berkeley DB version 2 and the version 1
+library. Unfortunately the two versions aren't compatible with each
+other. The undefined symbol error is actually caused because Berkeley
+DB version 1 doesn't have the symbol db_version.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want DB_File to use.
+
+Incompatible versions of db.h and libdb
+---------------------------------------
+
+BerkeleyDB seems to have built correctly, but you get an error like this
+when you run the test harness:
+
+ $ make test
+ PERL_DL_NONLAZY=1 /home/paul/perl/install/bin/perl5.00560 -Iblib/arch
+ -Iblib/lib -I/home/paul/perl/install/5.005_60/lib/5.00560/i586-linux
+ -I/home/paul/perl/install/5.005_60/lib/5.00560 -e 'use Test::Harness
+ qw(&runtests $verbose); $verbose=0; runtests @ARGV;' t/*.t
+ t/db-btree..........
+ DB_File needs compatible versions of libdb & db.h
+ you have db.h version 2.3.7 and libdb version 2.7.5
+ BEGIN failed--compilation aborted at t/db-btree.t line 21.
+ ...
+
+Another variation on the theme of having two versions of Berkeley DB on
+your system.
+
+Solution: Setting the LIB & INCLUDE variables in config.in to point to the
+ correct directories can sometimes be enough to fix this
+ problem. If that doesn't work the easiest way to fix the
+ problem is to either delete or temporarily rename the copies
+ of db.h and libdb.a that you don't want BerkeleyDB to use.
+ If you are running Linux, please read the Linux Notes section
+ below.
+
+
+Linux Notes
+-----------
+
+Newer versions of Linux (e.g. RedHat 6, SuSe 6) ship with a C library
+that has version 2.x of Berkeley DB linked into it. This makes it
+difficult to build this module with anything other than the version of
+Berkeley DB that shipped with your Linux release. If you do try to use
+a different version of Berkeley DB you will most likely get the error
+described in the "Incompatible versions of db.h and libdb" section of
+this file.
+
+To make matters worse, prior to Perl 5.6.1, the perl binary itself
+*always* included the Berkeley DB library.
+
+If you want to use a newer version of Berkeley DB with this module, the
+easiest solution is to use Perl 5.6.1 (or better) and Berkeley DB 3.x
+(or better).
+
+There are two approaches you can use to get older versions of Perl to
+work with specific versions of Berkeley DB. Both have their advantages
+and disadvantages.
+
+The first approach will only work when you want to build a version of
+Perl older than 5.6.1 along with Berkeley DB 3.x. If you want to use
+Berkeley DB 2.x, you must use the next approach. This approach involves
+rebuilding your existing version of Perl after applying an unofficial
+patch. The "patches" directory in the this module's source distribution
+contains a number of patch files. There is one patch file for every
+stable version of Perl since 5.004. Apply the appropriate patch to your
+Perl source tree before re-building and installing Perl from scratch.
+For example, assuming you are in the top-level source directory for
+Perl 5.6.0, the command below will apply the necessary patch. Remember
+to replace the path shown below with one that points to this module's
+patches directory.
+
+ patch -p1 -N </path/to/DB_File/patches/5.6.0
+
+Now rebuild & install perl. You should now have a perl binary that can
+be used to build this module. Follow the instructions in "BUILDING THE
+MODULE", remembering to set the INCLUDE and LIB variables in config.in.
+
+
+The second approach will work with both Berkeley DB 2.x and 3.x.
+Start by building Berkeley DB as a shared library. This is from
+the Berkeley DB build instructions:
+
+ Building Shared Libraries for the GNU GCC compiler
+
+ If you're using gcc and there's no better shared library example for
+ your architecture, the following shared library build procedure will
+ probably work.
+
+ Add the -fpic option to the CFLAGS value in the Makefile.
+
+ Rebuild all of your .o files. This will create a Berkeley DB library
+ that contains .o files with PIC code. To build the shared library,
+ then take the following steps in the library build directory:
+
+ % mkdir tmp
+ % cd tmp
+ % ar xv ../libdb.a
+ % gcc -shared -o libdb.so *.o
+ % mv libdb.so ..
+ % cd ..
+ % rm -rf tmp
+
+ Note, you may have to change the gcc line depending on the
+ requirements of your system.
+
+ The file libdb.so is your shared library
+
+Once you have built libdb.so, you will need to store it somewhere safe.
+
+ cp libdb.so /usr/local/BerkeleyDB/lib
+
+If you now set the LD_PRELOAD environment variable to point to this
+shared library, Perl will use it instead of the version of Berkeley DB
+that shipped with your Linux distribution.
+
+ export LD_PRELOAD=/usr/local/BerkeleyDB/lib/libdb.so
+
+Finally follow the instructions in "BUILDING THE MODULE" to build,
+test and install this module. Don't forget to set the INCLUDE and LIB
+variables in config.in.
+
+Remember, you will need to have the LD_PRELOAD variable set anytime you
+want to use Perl with Berkeley DB. Also note that if you have LD_PRELOAD
+permanently set it will affect ALL commands you execute. This may be a
+problem if you run any commands that access a database created by the
+version of Berkeley DB that shipped with your Linux distribution.
+
+
+Solaris Notes
+-------------
+
+If you are running Solaris 2.5, and you get this error when you run the
+DB_File test harness:
+
+ libc internal error: _rmutex_unlock: rmutex not held.
+
+you probably need to install a Sun patch. It has been reported that
+Sun patch 103187-25 (or later revisions) fixes this problem.
+
+To find out if you have the patch installed, the command "showrev -p"
+will display the patches that are currently installed on your system.
+
+
+HP-UX Notes
+-----------
+
+Some people running HP-UX 10 have reported getting an error like this
+when building DB_File with the native HP-UX compiler.
+
+ ld: (Warning) At least one PA 2.0 object file (DB_File.o) was detected.
+ The linked output may not run on a PA 1.x system.
+ ld: Invalid loader fixup for symbol "$000000A5".
+
+If this is the case for you, Berkeley DB needs to be recompiled with
+the +z or +Z option and the resulting library placed in a .sl file. The
+following steps should do the trick:
+
+ 1: Configure the Berkeley DB distribution with the +z or +Z C compiler
+ flag:
+
+ env "CFLAGS=+z" ../dist/configure ...
+
+ 2: Edit the Berkeley DB Makefile and change:
+
+ "libdb= libdb.a" to "libdb= libdb.sl".
+
+
+ 3: Build and install the Berkeley DB distribution as usual.
+
+
+IRIX NOTES
+----------
+
+If you are running IRIX, and want to use Berkeley DB version 1, you can
+get it from http://reality.sgi.com/ariel. It has the patches necessary
+to compile properly on IRIX 5.3.
+
+
+FEEDBACK
+========
+
+How to report a problem with DB_File.
+
+To help me help you, I need the following information:
+
+ 1. The version of Perl and the operating system name and version you
+ are running. The *complete* output from running "perl -V" will
+ tell me all I need to know. Don't edit the output in any way. Note,
+ I want you to run "perl -V" and NOT "perl -v".
+
+ If your perl does not understand the "-V" option it is too old. DB_File
+ needs Perl version 5.004 or better.
+
+ 2. The version of DB_File you have.
+ If you have successfully installed DB_File, this one-liner will
+ tell you:
+
+ perl -e 'use DB_File; print "DB_File ver $DB_File::VERSION\n"'
+
+ If you haven't installed DB_File then search DB_File.pm for a line
+ like this:
+
+ $VERSION = "1.20" ;
+
+ 3. The version of Berkeley DB you are using.
+ If you are using a version older than 1.85, think about upgrading. One
+ point to note if you are considering upgrading Berkeley DB - the
+ file formats for 1.85, 1.86, 2.0, 3.0 & 3.1 are all different.
+
+ If you have successfully installed DB_File, this command will display
+ the version of Berkeley DB it was built with:
+
+ perl -e 'use DB_File; print "Berkeley DB ver $DB_File::db_ver\n"'
+
+ 4. If you are having problems building DB_File, send me a complete log
+ of what happened.
+
+ 5. Now the difficult one. If you think you have found a bug in DB_File
+ and you want me to fix it, you will *greatly* enhance the chances
+ of me being able to track it down by sending me a small
+ self-contained Perl script that illustrates the problem you are
+ encountering. Include a summary of what you think the problem is
+ and a log of what happens when you run the script, in case I can't
+ reproduce your problem on my system. If possible, don't have the
+ script dependent on an existing 20Meg database. If the script you
+ send me can create the database itself then that is preferred.
+
+ I realise that in some cases this is easier said than done, so if
+ you can only reproduce the problem in your existing script, then
+ you can post me that if you want. Just don't expect me to find your
+ problem in a hurry, or at all. :-)
+
+
+CHANGES
+-------
+
+See the Changes file.
+
+Paul Marquess <Paul.Marquess@btinternet.com>
diff --git a/db/perl/DB_File/config.in b/db/perl/DB_File/config.in
new file mode 100644
index 000000000..dfd46bc59
--- /dev/null
+++ b/db/perl/DB_File/config.in
@@ -0,0 +1,99 @@
+# Filename: config.in
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+# last modified 9th Sept 1997
+# version 1.55
+
+# 1. Where is the file db.h?
+#
+# Change the path below to point to the directory where db.h is
+# installed on your system.
+
+INCLUDE = /usr/local/BerkeleyDB/include
+#INCLUDE = /usr/local/include
+#INCLUDE = /usr/include
+#INCLUDE = ./libraries/3.2.9
+
+# 2. Where is libdb?
+#
+# Change the path below to point to the directory where libdb is
+# installed on your system.
+
+LIB = /usr/local/BerkeleyDB/lib
+#LIB = /usr/local/lib
+#LIB = /usr/lib
+#LIB = ./libraries/3.2.9
+
+# 3. What version of Berkely DB have you got?
+#
+# If you have version 2.0 or greater, you can skip this question.
+#
+# If you have Berkeley DB 1.78 or greater you shouldn't have to
+# change the definitions for PREFIX and HASH below.
+#
+# For older versions of Berkeley DB change both PREFIX and HASH to int.
+# Version 1.71, 1.72 and 1.73 are known to need this change.
+#
+# If you don't know what version you have have a look in the file db.h.
+#
+# Search for the string "DB_VERSION_MAJOR". If it is present, you
+# have Berkeley DB version 2 (or greater).
+#
+# If that didn't work, find the definition of the BTREEINFO typedef.
+# Check the return type from the prefix element. It should look like
+# this in an older copy of db.h:
+#
+# int (*prefix) __P((const DBT *, const DBT *));
+#
+# and like this in a more recent copy:
+#
+# size_t (*prefix) /* prefix function */
+# __P((const DBT *, const DBT *));
+#
+# Change the definition of PREFIX, below, to reflect the return type
+# of the prefix function in your db.h.
+#
+# Now find the definition of the HASHINFO typedef. Check the return
+# type of the hash element. Older versions look like this:
+#
+# int (*hash) __P((const void *, size_t));
+#
+# newer like this:
+#
+# u_int32_t /* hash function */
+# (*hash) __P((const void *, size_t));
+#
+# Change the definition of HASH, below, to reflect the return type of
+# the hash function in your db.h.
+#
+
+PREFIX = size_t
+HASH = u_int32_t
+
+# 4. Is the library called libdb?
+#
+# If you have copies of both 1.x and 2.x Berkeley DB installed on
+# your system it can sometimes be tricky to make sure you are using
+# the correct one. Renaming one (or creating a symbolic link) to
+# include the version number of the library can help.
+#
+# For example, if you have both Berkeley DB 2.3.12 and 1.85 on your
+# system and you want to use the Berkeley DB version 2 library you
+# could rename the version 2 library from libdb.a to libdb-2.3.12.a and
+# change the DBNAME line below to look like this:
+#
+# DBNAME = -ldb-2.3.12
+#
+# That will ensure you are linking the correct version of the DB
+# library.
+#
+# Note: If you are building this module with Win32, -llibdb will be
+# used by default.
+#
+# If you have changed the name of the library, uncomment the line
+# below (by removing the leading #) and edit the line to use the name
+# you have picked.
+
+#DBNAME = -ldb-2.4.10
+
+# end of file config.in
diff --git a/db/perl/DB_File/dbinfo b/db/perl/DB_File/dbinfo
new file mode 100644
index 000000000..5a4df1590
--- /dev/null
+++ b/db/perl/DB_File/dbinfo
@@ -0,0 +1,109 @@
+#!/usr/local/bin/perl
+
+# Name: dbinfo -- identify berkeley DB version used to create
+# a database file
+#
+# Author: Paul Marquess <Paul.Marquess@btinternet.com>
+# Version: 1.03
+# Date 17th September 2000
+#
+# Copyright (c) 1998-2000 Paul Marquess. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+
+# Todo: Print more stats on a db file, e.g. no of records
+# add log/txn/lock files
+
+use strict ;
+
+my %Data =
+ (
+ 0x053162 => {
+ Type => "Btree",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "Unknown (older than 1.71)",
+ 3 => "1.71 -> 1.85, 1.86",
+ 4 => "Unknown",
+ 5 => "2.0.0 -> 2.3.0",
+ 6 => "2.3.1 -> 2.7.7",
+ 7 => "3.0.x",
+ 8 => "3.1.x or greater",
+ }
+ },
+ 0x061561 => {
+ Type => "Hash",
+ Versions =>
+ {
+ 1 => "Unknown (older than 1.71)",
+ 2 => "1.71 -> 1.85",
+ 3 => "1.86",
+ 4 => "2.0.0 -> 2.1.0",
+ 5 => "2.2.6 -> 2.7.7",
+ 6 => "3.0.x",
+ 7 => "3.1.x or greater",
+ }
+ },
+ 0x042253 => {
+ Type => "Queue",
+ Versions =>
+ {
+ 1 => "3.0.x",
+ 2 => "3.1.x",
+ 3 => "3.2.x or greater",
+ }
+ },
+ ) ;
+
+die "Usage: dbinfo file\n" unless @ARGV == 1 ;
+
+print "testing file $ARGV[0]...\n\n" ;
+open (F, "<$ARGV[0]") or die "Cannot open file $ARGV[0]: $!\n" ;
+
+my $buff ;
+read F, $buff, 20 ;
+
+my (@info) = unpack("NNNNN", $buff) ;
+my (@info1) = unpack("VVVVV", $buff) ;
+my ($magic, $version, $endian) ;
+
+if ($Data{$info[0]}) # first try DB 1.x format
+{
+ $magic = $info[0] ;
+ $version = $info[1] ;
+ $endian = "Unknown" ;
+}
+elsif ($Data{$info[3]}) # next DB 2.x big endian
+{
+ $magic = $info[3] ;
+ $version = $info[4] ;
+ $endian = "Big Endian" ;
+}
+elsif ($Data{$info1[3]}) # next DB 2.x little endian
+{
+ $magic = $info1[3] ;
+ $version = $info1[4] ;
+ $endian = "Little Endian" ;
+}
+else
+ { die "not a Berkeley DB database file.\n" }
+
+my $type = $Data{$magic} ;
+$magic = sprintf "%06X", $magic ;
+
+my $ver_string = "Unknown" ;
+$ver_string = $type->{Versions}{$version}
+ if defined $type->{Versions}{$version} ;
+
+print <<EOM ;
+File Type: Berkeley DB $type->{Type} file.
+File Version ID: $version
+Built with Berkeley DB: $ver_string
+Byte Order: $endian
+Magic: $magic
+EOM
+
+close F ;
+
+exit ;
diff --git a/db/perl/DB_File/hints/dynixptx.pl b/db/perl/DB_File/hints/dynixptx.pl
new file mode 100644
index 000000000..bb5ffa56e
--- /dev/null
+++ b/db/perl/DB_File/hints/dynixptx.pl
@@ -0,0 +1,3 @@
+# Need to add an extra '-lc' to the end to work around a DYNIX/ptx bug
+
+$self->{LIBS} = ['-lm -lc'];
diff --git a/db/perl/DB_File/hints/sco.pl b/db/perl/DB_File/hints/sco.pl
new file mode 100644
index 000000000..ff6044094
--- /dev/null
+++ b/db/perl/DB_File/hints/sco.pl
@@ -0,0 +1,2 @@
+# osr5 needs to explicitly link against libc to pull in some static symbols
+$self->{LIBS} = ['-ldb -lc'] if $Config{'osvers'} =~ '3\.2v5\.0\..' ;
diff --git a/db/perl/DB_File/patches/5.004 b/db/perl/DB_File/patches/5.004
new file mode 100644
index 000000000..143ec95af
--- /dev/null
+++ b/db/perl/DB_File/patches/5.004
@@ -0,0 +1,44 @@
+diff perl5.004.orig/Configure perl5.004/Configure
+190a191
+> perllibs=''
+9904a9906,9913
+> : Remove libraries needed only for extensions
+> : The appropriate ext/Foo/Makefile.PL will add them back in, if
+> : necessary.
+> set X `echo " $libs " |
+> sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
+> shift
+> perllibs="$*"
+>
+10372a10382
+> perllibs='$perllibs'
+diff perl5.004.orig/Makefile.SH perl5.004/Makefile.SH
+122c122
+< libs = $libs $cryptlib
+---
+> libs = $perllibs $cryptlib
+Common subdirectories: perl5.004.orig/Porting and perl5.004/Porting
+Common subdirectories: perl5.004.orig/cygwin32 and perl5.004/cygwin32
+Common subdirectories: perl5.004.orig/eg and perl5.004/eg
+Common subdirectories: perl5.004.orig/emacs and perl5.004/emacs
+Common subdirectories: perl5.004.orig/ext and perl5.004/ext
+Common subdirectories: perl5.004.orig/h2pl and perl5.004/h2pl
+Common subdirectories: perl5.004.orig/hints and perl5.004/hints
+Common subdirectories: perl5.004.orig/lib and perl5.004/lib
+diff perl5.004.orig/myconfig perl5.004/myconfig
+38c38
+< libs=$libs
+---
+> libs=$perllibs
+Common subdirectories: perl5.004.orig/os2 and perl5.004/os2
+diff perl5.004.orig/patchlevel.h perl5.004/patchlevel.h
+40a41
+> ,"NODB-1.0 - remove -ldb from core perl binary."
+Common subdirectories: perl5.004.orig/plan9 and perl5.004/plan9
+Common subdirectories: perl5.004.orig/pod and perl5.004/pod
+Common subdirectories: perl5.004.orig/qnx and perl5.004/qnx
+Common subdirectories: perl5.004.orig/t and perl5.004/t
+Common subdirectories: perl5.004.orig/utils and perl5.004/utils
+Common subdirectories: perl5.004.orig/vms and perl5.004/vms
+Common subdirectories: perl5.004.orig/win32 and perl5.004/win32
+Common subdirectories: perl5.004.orig/x2p and perl5.004/x2p
diff --git a/db/perl/DB_File/patches/5.004_01 b/db/perl/DB_File/patches/5.004_01
new file mode 100644
index 000000000..1b05eb4e0
--- /dev/null
+++ b/db/perl/DB_File/patches/5.004_01
@@ -0,0 +1,217 @@
+diff -rc perl5.004_01.orig/Configure perl5.004_01/Configure
+*** perl5.004_01.orig/Configure Wed Jun 11 00:28:03 1997
+--- perl5.004_01/Configure Sun Nov 12 22:12:35 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9907,9912 ****
+--- 9908,9921 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10375,10380 ****
+--- 10384,10390 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_01.orig/Makefile.SH perl5.004_01/Makefile.SH
+*** perl5.004_01.orig/Makefile.SH Thu Jun 12 23:27:56 1997
+--- perl5.004_01/Makefile.SH Sun Nov 12 22:12:35 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Embed.pm perl5.004_01/lib/ExtUtils/Embed.pm
+*** perl5.004_01.orig/lib/ExtUtils/Embed.pm Wed Apr 2 22:12:04 1997
+--- perl5.004_01/lib/ExtUtils/Embed.pm Sun Nov 12 22:12:35 2000
+***************
+*** 170,176 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 170,176 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_01.orig/lib/ExtUtils/Liblist.pm perl5.004_01/lib/ExtUtils/Liblist.pm
+*** perl5.004_01.orig/lib/ExtUtils/Liblist.pm Sat Jun 7 01:19:44 1997
+--- perl5.004_01/lib/ExtUtils/Liblist.pm Sun Nov 12 22:13:27 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $Verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $Verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $Verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm perl5.004_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_01.orig/lib/ExtUtils/MM_Unix.pm Thu Jun 12 22:06:18 1997
+--- perl5.004_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:12:35 2000
+***************
+*** 2137,2143 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2137,2143 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_01.orig/myconfig perl5.004_01/myconfig
+*** perl5.004_01.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_01/myconfig Sun Nov 12 22:12:35 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_01.orig/patchlevel.h perl5.004_01/patchlevel.h
+*** perl5.004_01.orig/patchlevel.h Wed Jun 11 03:06:10 1997
+--- perl5.004_01/patchlevel.h Sun Nov 12 22:12:35 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/DB_File/patches/5.004_02 b/db/perl/DB_File/patches/5.004_02
new file mode 100644
index 000000000..238f87379
--- /dev/null
+++ b/db/perl/DB_File/patches/5.004_02
@@ -0,0 +1,217 @@
+diff -rc perl5.004_02.orig/Configure perl5.004_02/Configure
+*** perl5.004_02.orig/Configure Thu Aug 7 15:08:44 1997
+--- perl5.004_02/Configure Sun Nov 12 22:06:24 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_02.orig/Makefile.SH perl5.004_02/Makefile.SH
+*** perl5.004_02.orig/Makefile.SH Thu Aug 7 13:10:53 1997
+--- perl5.004_02/Makefile.SH Sun Nov 12 22:06:24 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Embed.pm perl5.004_02/lib/ExtUtils/Embed.pm
+*** perl5.004_02.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_02/lib/ExtUtils/Embed.pm Sun Nov 12 22:06:24 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_02.orig/lib/ExtUtils/Liblist.pm perl5.004_02/lib/ExtUtils/Liblist.pm
+*** perl5.004_02.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_02/lib/ExtUtils/Liblist.pm Sun Nov 12 22:06:24 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm perl5.004_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_02.orig/lib/ExtUtils/MM_Unix.pm Tue Aug 5 14:28:08 1997
+--- perl5.004_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 22:06:25 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_02.orig/myconfig perl5.004_02/myconfig
+*** perl5.004_02.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_02/myconfig Sun Nov 12 22:06:25 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_02.orig/patchlevel.h perl5.004_02/patchlevel.h
+*** perl5.004_02.orig/patchlevel.h Fri Aug 1 15:07:34 1997
+--- perl5.004_02/patchlevel.h Sun Nov 12 22:06:25 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/DB_File/patches/5.004_03 b/db/perl/DB_File/patches/5.004_03
new file mode 100644
index 000000000..06331eac9
--- /dev/null
+++ b/db/perl/DB_File/patches/5.004_03
@@ -0,0 +1,223 @@
+diff -rc perl5.004_03.orig/Configure perl5.004_03/Configure
+*** perl5.004_03.orig/Configure Wed Aug 13 16:09:46 1997
+--- perl5.004_03/Configure Sun Nov 12 21:56:18 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9911,9916 ****
+--- 9912,9925 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10379,10384 ****
+--- 10388,10394 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.004_03: Configure.orig
+diff -rc perl5.004_03.orig/Makefile.SH perl5.004_03/Makefile.SH
+*** perl5.004_03.orig/Makefile.SH Mon Aug 18 19:24:29 1997
+--- perl5.004_03/Makefile.SH Sun Nov 12 21:56:18 2000
+***************
+*** 126,132 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 126,132 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.004_03: Makefile.SH.orig
+diff -rc perl5.004_03.orig/lib/ExtUtils/Embed.pm perl5.004_03/lib/ExtUtils/Embed.pm
+*** perl5.004_03.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_03/lib/ExtUtils/Embed.pm Sun Nov 12 21:56:18 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_03.orig/lib/ExtUtils/Liblist.pm perl5.004_03/lib/ExtUtils/Liblist.pm
+*** perl5.004_03.orig/lib/ExtUtils/Liblist.pm Fri Aug 1 19:36:58 1997
+--- perl5.004_03/lib/ExtUtils/Liblist.pm Sun Nov 12 21:57:17 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ print STDOUT "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 186,196 ****
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{libs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 186,196 ----
+ my($self, $potential_libs, $verbose) = @_;
+
+ # If user did not supply a list, we punt.
+! # (caller should probably use the list in $Config{perllibs})
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 540,546 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 540,546 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.orig
+Only in perl5.004_03/lib/ExtUtils: Liblist.pm.rej
+diff -rc perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm perl5.004_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_03.orig/lib/ExtUtils/MM_Unix.pm Mon Aug 18 19:16:12 1997
+--- perl5.004_03/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:56:19 2000
+***************
+*** 2224,2230 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2224,2230 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.004_03/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.004_03.orig/myconfig perl5.004_03/myconfig
+*** perl5.004_03.orig/myconfig Sat Dec 21 01:13:20 1996
+--- perl5.004_03/myconfig Sun Nov 12 21:56:19 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_03.orig/patchlevel.h perl5.004_03/patchlevel.h
+*** perl5.004_03.orig/patchlevel.h Wed Aug 13 11:42:01 1997
+--- perl5.004_03/patchlevel.h Sun Nov 12 21:56:19 2000
+***************
+*** 38,43 ****
+--- 38,44 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
+Only in perl5.004_03: patchlevel.h.orig
diff --git a/db/perl/DB_File/patches/5.004_04 b/db/perl/DB_File/patches/5.004_04
new file mode 100644
index 000000000..a227dc700
--- /dev/null
+++ b/db/perl/DB_File/patches/5.004_04
@@ -0,0 +1,209 @@
+diff -rc perl5.004_04.orig/Configure perl5.004_04/Configure
+*** perl5.004_04.orig/Configure Fri Oct 3 18:57:39 1997
+--- perl5.004_04/Configure Sun Nov 12 21:50:51 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 9910,9915 ****
+--- 9911,9924 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10378,10383 ****
+--- 10387,10393 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_04.orig/Makefile.SH perl5.004_04/Makefile.SH
+*** perl5.004_04.orig/Makefile.SH Wed Oct 15 10:33:16 1997
+--- perl5.004_04/Makefile.SH Sun Nov 12 21:50:51 2000
+***************
+*** 129,135 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 129,135 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Embed.pm perl5.004_04/lib/ExtUtils/Embed.pm
+*** perl5.004_04.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_04/lib/ExtUtils/Embed.pm Sun Nov 12 21:50:51 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_04.orig/lib/ExtUtils/Liblist.pm perl5.004_04/lib/ExtUtils/Liblist.pm
+*** perl5.004_04.orig/lib/ExtUtils/Liblist.pm Tue Sep 9 17:41:32 1997
+--- perl5.004_04/lib/ExtUtils/Liblist.pm Sun Nov 12 21:51:33 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 189,195 ****
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+--- 189,195 ----
+ return ("", "", "", "") unless $potential_libs;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my($libpth) = $Config{'libpth'};
+ my($libext) = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 539,545 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 539,545 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm perl5.004_04/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_04.orig/lib/ExtUtils/MM_Unix.pm Wed Oct 8 14:13:51 1997
+--- perl5.004_04/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:50:51 2000
+***************
+*** 2229,2235 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2229,2235 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_04.orig/myconfig perl5.004_04/myconfig
+*** perl5.004_04.orig/myconfig Mon Oct 6 18:26:49 1997
+--- perl5.004_04/myconfig Sun Nov 12 21:50:51 2000
+***************
+*** 35,41 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 35,41 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_04.orig/patchlevel.h perl5.004_04/patchlevel.h
+*** perl5.004_04.orig/patchlevel.h Wed Oct 15 10:55:19 1997
+--- perl5.004_04/patchlevel.h Sun Nov 12 21:50:51 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/DB_File/patches/5.004_05 b/db/perl/DB_File/patches/5.004_05
new file mode 100644
index 000000000..51c8bf350
--- /dev/null
+++ b/db/perl/DB_File/patches/5.004_05
@@ -0,0 +1,209 @@
+diff -rc perl5.004_05.orig/Configure perl5.004_05/Configure
+*** perl5.004_05.orig/Configure Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Configure Sun Nov 12 21:36:25 2000
+***************
+*** 188,193 ****
+--- 188,194 ----
+ mv=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 10164,10169 ****
+--- 10165,10178 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 10648,10653 ****
+--- 10657,10663 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.004_05.orig/Makefile.SH perl5.004_05/Makefile.SH
+*** perl5.004_05.orig/Makefile.SH Thu Jan 6 22:05:49 2000
+--- perl5.004_05/Makefile.SH Sun Nov 12 21:36:25 2000
+***************
+*** 151,157 ****
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 151,157 ----
+ ext = \$(dynamic_ext) \$(static_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Embed.pm perl5.004_05/lib/ExtUtils/Embed.pm
+*** perl5.004_05.orig/lib/ExtUtils/Embed.pm Fri Aug 1 15:08:44 1997
+--- perl5.004_05/lib/ExtUtils/Embed.pm Sun Nov 12 21:36:25 2000
+***************
+*** 178,184 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 178,184 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.004_05.orig/lib/ExtUtils/Liblist.pm perl5.004_05/lib/ExtUtils/Liblist.pm
+*** perl5.004_05.orig/lib/ExtUtils/Liblist.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/Liblist.pm Sun Nov 12 21:45:31 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 590,596 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 590,596 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm perl5.004_05/lib/ExtUtils/MM_Unix.pm
+*** perl5.004_05.orig/lib/ExtUtils/MM_Unix.pm Thu Jan 6 22:05:54 2000
+--- perl5.004_05/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:36:25 2000
+***************
+*** 2246,2252 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2246,2252 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.004_05.orig/myconfig perl5.004_05/myconfig
+*** perl5.004_05.orig/myconfig Thu Jan 6 22:05:55 2000
+--- perl5.004_05/myconfig Sun Nov 12 21:43:54 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so
+ useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+diff -rc perl5.004_05.orig/patchlevel.h perl5.004_05/patchlevel.h
+*** perl5.004_05.orig/patchlevel.h Thu Jan 6 22:05:48 2000
+--- perl5.004_05/patchlevel.h Sun Nov 12 21:36:25 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ /* The following line and terminating '};' are read by perlbug.PL. Don't alter. */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/DB_File/patches/5.005 b/db/perl/DB_File/patches/5.005
new file mode 100644
index 000000000..effee3e82
--- /dev/null
+++ b/db/perl/DB_File/patches/5.005
@@ -0,0 +1,209 @@
+diff -rc perl5.005.orig/Configure perl5.005/Configure
+*** perl5.005.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005/Configure Sun Nov 12 21:30:40 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005.orig/Makefile.SH perl5.005/Makefile.SH
+*** perl5.005.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005/Makefile.SH Sun Nov 12 21:30:40 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005.orig/lib/ExtUtils/Embed.pm perl5.005/lib/ExtUtils/Embed.pm
+*** perl5.005.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005/lib/ExtUtils/Embed.pm Sun Nov 12 21:30:40 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005.orig/lib/ExtUtils/Liblist.pm perl5.005/lib/ExtUtils/Liblist.pm
+*** perl5.005.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005/lib/ExtUtils/Liblist.pm Sun Nov 12 21:30:40 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005.orig/lib/ExtUtils/MM_Unix.pm perl5.005/lib/ExtUtils/MM_Unix.pm
+*** perl5.005.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005/lib/ExtUtils/MM_Unix.pm Sun Nov 12 21:30:41 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005.orig/myconfig perl5.005/myconfig
+*** perl5.005.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005/myconfig Sun Nov 12 21:30:41 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005.orig/patchlevel.h perl5.005/patchlevel.h
+*** perl5.005.orig/patchlevel.h Wed Jul 22 19:22:01 1998
+--- perl5.005/patchlevel.h Sun Nov 12 21:30:41 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/DB_File/patches/5.005_01 b/db/perl/DB_File/patches/5.005_01
new file mode 100644
index 000000000..2a05dd545
--- /dev/null
+++ b/db/perl/DB_File/patches/5.005_01
@@ -0,0 +1,209 @@
+diff -rc perl5.005_01.orig/Configure perl5.005_01/Configure
+*** perl5.005_01.orig/Configure Wed Jul 15 08:05:44 1998
+--- perl5.005_01/Configure Sun Nov 12 20:55:58 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11279,11284 ****
+--- 11280,11293 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11804,11809 ****
+--- 11813,11819 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_01.orig/Makefile.SH perl5.005_01/Makefile.SH
+*** perl5.005_01.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_01/Makefile.SH Sun Nov 12 20:55:58 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Embed.pm perl5.005_01/lib/ExtUtils/Embed.pm
+*** perl5.005_01.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_01/lib/ExtUtils/Embed.pm Sun Nov 12 20:55:58 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_01.orig/lib/ExtUtils/Liblist.pm perl5.005_01/lib/ExtUtils/Liblist.pm
+*** perl5.005_01.orig/lib/ExtUtils/Liblist.pm Wed Jul 22 07:09:42 1998
+--- perl5.005_01/lib/ExtUtils/Liblist.pm Sun Nov 12 20:55:58 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 290,296 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 290,296 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 598,604 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 598,604 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+diff -rc perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm perl5.005_01/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_01.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_01/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:55:58 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -rc perl5.005_01.orig/myconfig perl5.005_01/myconfig
+*** perl5.005_01.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_01/myconfig Sun Nov 12 20:55:58 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_01.orig/patchlevel.h perl5.005_01/patchlevel.h
+*** perl5.005_01.orig/patchlevel.h Mon Jan 3 11:07:45 2000
+--- perl5.005_01/patchlevel.h Sun Nov 12 20:55:58 2000
+***************
+*** 39,44 ****
+--- 39,45 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/DB_File/patches/5.005_02 b/db/perl/DB_File/patches/5.005_02
new file mode 100644
index 000000000..5dd57ddc0
--- /dev/null
+++ b/db/perl/DB_File/patches/5.005_02
@@ -0,0 +1,264 @@
+diff -rc perl5.005_02.orig/Configure perl5.005_02/Configure
+*** perl5.005_02.orig/Configure Mon Jan 3 11:12:20 2000
+--- perl5.005_02/Configure Sun Nov 12 20:50:51 2000
+***************
+*** 234,239 ****
+--- 234,240 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11334,11339 ****
+--- 11335,11348 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 11859,11864 ****
+--- 11868,11874 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+Only in perl5.005_02: Configure.orig
+diff -rc perl5.005_02.orig/Makefile.SH perl5.005_02/Makefile.SH
+*** perl5.005_02.orig/Makefile.SH Sun Jul 19 08:06:35 1998
+--- perl5.005_02/Makefile.SH Sun Nov 12 20:50:51 2000
+***************
+*** 150,156 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 150,156 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+Only in perl5.005_02: Makefile.SH.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/Embed.pm perl5.005_02/lib/ExtUtils/Embed.pm
+*** perl5.005_02.orig/lib/ExtUtils/Embed.pm Wed Jul 22 07:45:02 1998
+--- perl5.005_02/lib/ExtUtils/Embed.pm Sun Nov 12 20:50:51 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_02.orig/lib/ExtUtils/Liblist.pm perl5.005_02/lib/ExtUtils/Liblist.pm
+*** perl5.005_02.orig/lib/ExtUtils/Liblist.pm Mon Jan 3 11:12:21 2000
+--- perl5.005_02/lib/ExtUtils/Liblist.pm Sun Nov 12 20:50:51 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 333,339 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 333,339 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 623,629 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+--- 623,629 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>
+ as well as in C<$Config{libpth}>. For each library that is found, a
+***************
+*** 666,672 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 666,672 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 676,682 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 676,682 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+Only in perl5.005_02/lib/ExtUtils: Liblist.pm.orig
+diff -rc perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm perl5.005_02/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_02.orig/lib/ExtUtils/MM_Unix.pm Tue Jul 14 04:39:12 1998
+--- perl5.005_02/lib/ExtUtils/MM_Unix.pm Sun Nov 12 20:50:51 2000
+***************
+*** 2281,2287 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2281,2287 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+Only in perl5.005_02/lib/ExtUtils: MM_Unix.pm.orig
+diff -rc perl5.005_02.orig/myconfig perl5.005_02/myconfig
+*** perl5.005_02.orig/myconfig Fri Apr 3 01:20:35 1998
+--- perl5.005_02/myconfig Sun Nov 12 20:50:51 2000
+***************
+*** 34,40 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 34,40 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -rc perl5.005_02.orig/patchlevel.h perl5.005_02/patchlevel.h
+*** perl5.005_02.orig/patchlevel.h Mon Jan 3 11:12:19 2000
+--- perl5.005_02/patchlevel.h Sun Nov 12 20:50:51 2000
+***************
+*** 40,45 ****
+--- 40,46 ----
+ */
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/DB_File/patches/5.005_03 b/db/perl/DB_File/patches/5.005_03
new file mode 100644
index 000000000..115f9f5b9
--- /dev/null
+++ b/db/perl/DB_File/patches/5.005_03
@@ -0,0 +1,250 @@
+diff -rc perl5.005_03.orig/Configure perl5.005_03/Configure
+*** perl5.005_03.orig/Configure Sun Mar 28 17:12:57 1999
+--- perl5.005_03/Configure Sun Sep 17 22:19:16 2000
+***************
+*** 208,213 ****
+--- 208,214 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 11642,11647 ****
+--- 11643,11656 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 12183,12188 ****
+--- 12192,12198 ----
+ patchlevel='$patchlevel'
+ path_sep='$path_sep'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -rc perl5.005_03.orig/Makefile.SH perl5.005_03/Makefile.SH
+*** perl5.005_03.orig/Makefile.SH Thu Mar 4 02:35:25 1999
+--- perl5.005_03/Makefile.SH Sun Sep 17 22:21:01 2000
+***************
+*** 58,67 ****
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $libs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $libs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+--- 58,67 ----
+ shrpldflags="-H512 -T512 -bhalt:4 -bM:SRE -bE:perl.exp"
+ case "$osvers" in
+ 3*)
+! shrpldflags="$shrpldflags -e _nostart $ldflags $perllibs $cryptlib"
+ ;;
+ *)
+! shrpldflags="$shrpldflags -b noentry $ldflags $perllibs $cryptlib"
+ ;;
+ esac
+ aixinstdir=`pwd | sed 's/\/UU$//'`
+***************
+*** 155,161 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 155,161 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Embed.pm perl5.005_03/lib/ExtUtils/Embed.pm
+*** perl5.005_03.orig/lib/ExtUtils/Embed.pm Wed Jan 6 02:17:50 1999
+--- perl5.005_03/lib/ExtUtils/Embed.pm Sun Sep 17 22:19:16 2000
+***************
+*** 194,200 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 194,200 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/Liblist.pm perl5.005_03/lib/ExtUtils/Liblist.pm
+*** perl5.005_03.orig/lib/ExtUtils/Liblist.pm Wed Jan 6 02:17:47 1999
+--- perl5.005_03/lib/ExtUtils/Liblist.pm Sun Sep 17 22:19:16 2000
+***************
+*** 16,33 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 16,33 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 196,202 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 196,202 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 336,342 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 336,342 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 626,632 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 626,632 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 670,676 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 670,676 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 680,686 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 680,686 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -rc perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm perl5.005_03/lib/ExtUtils/MM_Unix.pm
+*** perl5.005_03.orig/lib/ExtUtils/MM_Unix.pm Fri Mar 5 00:34:20 1999
+--- perl5.005_03/lib/ExtUtils/MM_Unix.pm Sun Sep 17 22:19:16 2000
+***************
+*** 2284,2290 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2284,2290 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
diff --git a/db/perl/DB_File/patches/5.6.0 b/db/perl/DB_File/patches/5.6.0
new file mode 100644
index 000000000..1f9b3b620
--- /dev/null
+++ b/db/perl/DB_File/patches/5.6.0
@@ -0,0 +1,294 @@
+diff -cr perl-5.6.0.orig/Configure perl-5.6.0/Configure
+*** perl-5.6.0.orig/Configure Wed Mar 22 20:36:37 2000
+--- perl-5.6.0/Configure Sun Sep 17 23:40:15 2000
+***************
+*** 217,222 ****
+--- 217,223 ----
+ nm=''
+ nroff=''
+ perl=''
++ perllibs=''
+ pg=''
+ pmake=''
+ pr=''
+***************
+*** 14971,14976 ****
+--- 14972,14985 ----
+ shift
+ extensions="$*"
+
++ : Remove libraries needed only for extensions
++ : The appropriate ext/Foo/Makefile.PL will add them back in, if
++ : necessary.
++ set X `echo " $libs " |
++ sed -e 's@ -lndbm @ @' -e 's@ -lgdbm @ @' -e 's@ -ldbm @ @' -e 's@ -ldb @ @'`
++ shift
++ perllibs="$*"
++
+ : Remove build directory name from cppstdin so it can be used from
+ : either the present location or the final installed location.
+ echo " "
+***************
+*** 15640,15645 ****
+--- 15649,15655 ----
+ path_sep='$path_sep'
+ perl5='$perl5'
+ perl='$perl'
++ perllibs='$perllibs'
+ perladmin='$perladmin'
+ perlpath='$perlpath'
+ pg='$pg'
+diff -cr perl-5.6.0.orig/Makefile.SH perl-5.6.0/Makefile.SH
+*** perl-5.6.0.orig/Makefile.SH Sat Mar 11 16:05:24 2000
+--- perl-5.6.0/Makefile.SH Sun Sep 17 23:40:15 2000
+***************
+*** 70,76 ****
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $libs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+--- 70,76 ----
+ *) shrpldflags="$shrpldflags -b noentry"
+ ;;
+ esac
+! shrpldflags="$shrpldflags $ldflags $perllibs $cryptlib"
+ linklibperl="-L $archlibexp/CORE -L `pwd | sed 's/\/UU$//'` -lperl"
+ ;;
+ hpux*)
+***************
+*** 176,182 ****
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $libs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+--- 176,182 ----
+ ext = \$(dynamic_ext) \$(static_ext) \$(nonxs_ext)
+ DYNALOADER = lib/auto/DynaLoader/DynaLoader\$(LIB_EXT)
+
+! libs = $perllibs $cryptlib
+
+ public = perl $suidperl utilities translators
+
+***************
+*** 333,339 ****
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $libs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+--- 333,339 ----
+ case "$osname" in
+ aix)
+ $spitshell >>Makefile <<!GROK!THIS!
+! LIBS = $perllibs
+ # In AIX we need to change this for building Perl itself from
+ # its earlier definition (which is for building external
+ # extensions *after* Perl has been built and installed)
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Embed.pm perl-5.6.0/lib/ExtUtils/Embed.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Embed.pm Sun Jan 23 12:08:32 2000
+--- perl-5.6.0/lib/ExtUtils/Embed.pm Sun Sep 17 23:40:15 2000
+***************
+*** 193,199 ****
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{libs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+--- 193,199 ----
+ @path = $path ? split(/:/, $path) : @INC;
+
+ push(@potential_libs, @link_args) if scalar @link_args;
+! push(@potential_libs, $Config{perllibs}) if defined $std;
+
+ push(@mods, static_ext()) if $std;
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/Liblist.pm perl-5.6.0/lib/ExtUtils/Liblist.pm
+*** perl-5.6.0.orig/lib/ExtUtils/Liblist.pm Wed Mar 22 16:16:31 2000
+--- perl-5.6.0/lib/ExtUtils/Liblist.pm Sun Sep 17 23:40:15 2000
+***************
+*** 17,34 ****
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{libs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{libs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'libs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+--- 17,34 ----
+
+ sub _unix_os2_ext {
+ my($self,$potential_libs, $verbose) = @_;
+! if ($^O =~ 'os2' and $Config{perllibs}) {
+ # Dynamic libraries are not transitive, so we may need including
+ # the libraries linked against perl.dll again.
+
+ $potential_libs .= " " if $potential_libs;
+! $potential_libs .= $Config{perllibs};
+ }
+ return ("", "", "", "") unless $potential_libs;
+ warn "Potential libraries are '$potential_libs':\n" if $verbose;
+
+ my($so) = $Config{'so'};
+! my($libs) = $Config{'perllibs'};
+ my $Config_libext = $Config{lib_ext} || ".a";
+
+
+***************
+*** 198,204 ****
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'libs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+--- 198,204 ----
+ my $BC = 1 if $cc =~ /^bcc/i;
+ my $GC = 1 if $cc =~ /^gcc/i;
+ my $so = $Config{'so'};
+! my $libs = $Config{'perllibs'};
+ my $libpth = $Config{'libpth'};
+ my $libext = $Config{'lib_ext'} || ".lib";
+
+***************
+*** 338,344 ****
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+--- 338,344 ----
+ $self->{CCFLAS} || $Config{'ccflags'};
+ @crtls = ( ($dbgqual =~ m-/Debug-i ? $Config{'dbgprefix'} : '')
+ . 'PerlShr/Share' );
+! push(@crtls, grep { not /\(/ } split /\s+/, $Config{'perllibs'});
+ push(@crtls, grep { not /\(/ } split /\s+/, $Config{'libc'});
+ # In general, we pass through the basic libraries from %Config unchanged.
+ # The one exception is that if we're building in the Perl source tree, and
+***************
+*** 624,630 ****
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{libs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+--- 624,630 ----
+ =item *
+
+ If C<$potential_libs> is empty, the return value will be empty.
+! Otherwise, the libraries specified by C<$Config{perllibs}> (see Config.pm)
+ will be appended to the list of C<$potential_libs>. The libraries
+ will be searched for in the directories specified in C<$potential_libs>,
+ C<$Config{libpth}>, and in C<$Config{installarchlib}/CORE>.
+***************
+*** 668,674 ****
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{libs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+--- 668,674 ----
+ alphanumeric characters are treated as flags. Unknown flags will be ignored.
+
+ An entry that matches C</:nodefault/i> disables the appending of default
+! libraries found in C<$Config{perllibs}> (this should be only needed very rarely).
+
+ An entry that matches C</:nosearch/i> disables all searching for
+ the libraries specified after it. Translation of C<-Lfoo> and
+***************
+*** 678,684 ****
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{libs}>.
+
+ =item *
+
+--- 678,684 ----
+
+ An entry that matches C</:search/i> reenables searching for
+ the libraries specified after it. You can put it at the end to
+! enable searching for default libraries specified by C<$Config{perllibs}>.
+
+ =item *
+
+diff -cr perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm perl-5.6.0/lib/ExtUtils/MM_Unix.pm
+*** perl-5.6.0.orig/lib/ExtUtils/MM_Unix.pm Thu Mar 2 17:52:52 2000
+--- perl-5.6.0/lib/ExtUtils/MM_Unix.pm Sun Sep 17 23:40:15 2000
+***************
+*** 2450,2456 ****
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{libs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+--- 2450,2456 ----
+ MAP_STATIC = ",
+ join(" \\\n\t", reverse sort keys %static), "
+
+! MAP_PRELIBS = $Config::Config{perllibs} $Config::Config{cryptlib}
+ ";
+
+ if (defined $libperl) {
+diff -cr perl-5.6.0.orig/myconfig.SH perl-5.6.0/myconfig.SH
+*** perl-5.6.0.orig/myconfig.SH Sat Feb 26 06:34:49 2000
+--- perl-5.6.0/myconfig.SH Sun Sep 17 23:41:17 2000
+***************
+*** 48,54 ****
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$libs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+--- 48,54 ----
+ Linker and Libraries:
+ ld='$ld', ldflags ='$ldflags'
+ libpth=$libpth
+! libs=$perllibs
+ libc=$libc, so=$so, useshrplib=$useshrplib, libperl=$libperl
+ Dynamic Linking:
+ dlsrc=$dlsrc, dlext=$dlext, d_dlsymun=$d_dlsymun, ccdlflags='$ccdlflags'
+diff -cr perl-5.6.0.orig/patchlevel.h perl-5.6.0/patchlevel.h
+*** perl-5.6.0.orig/patchlevel.h Wed Mar 22 20:23:11 2000
+--- perl-5.6.0/patchlevel.h Sun Sep 17 23:40:15 2000
+***************
+*** 70,75 ****
+--- 70,76 ----
+ #if !defined(PERL_PATCHLEVEL_H_IMPLICIT) && !defined(LOCAL_PATCH_COUNT)
+ static char *local_patches[] = {
+ NULL
++ ,"NODB-1.0 - remove -ldb from core perl binary."
+ ,NULL
+ };
+
diff --git a/db/perl/DB_File/t/db-btree.t b/db/perl/DB_File/t/db-btree.t
new file mode 100644
index 000000000..326b8d9ad
--- /dev/null
+++ b/db/perl/DB_File/t/db-btree.t
@@ -0,0 +1,1307 @@
+#!./perl -w
+
+use warnings;
+use strict;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..157\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+
+print "1..157\n";
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+sub lexical
+{
+ my(@a) = unpack ("C*", $a) ;
+ my(@b) = unpack ("C*", $b) ;
+
+ my $len = (@a > @b ? @b : @a) ;
+ my $i = 0 ;
+
+ foreach $i ( 0 .. $len -1) {
+ return $a[$i] - $b[$i] if $a[$i] != $b[$i] ;
+ }
+
+ return @a - @b ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ #local $/ = undef unless wantarray ;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my @result = <CAT>;
+ close(CAT);
+ wantarray ? @result : join("", @result) ;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ #local $/ = undef unless wantarray ;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my @result = <CAT>;
+ close(CAT);
+ unlink $file ;
+ wantarray ? @result : join("", @result) ;
+}
+
+
+my $db185mode = ($DB_File::db_version == 1 && ! $DB_File::db_185_compat) ;
+my $null_keys_allowed = ($DB_File::db_ver < 2.004010
+ || $DB_File::db_ver >= 3.1 );
+
+my $Dfile = "dbbtree.tmp";
+unlink $Dfile;
+
+umask(0);
+
+# Check the interface to BTREEINFO
+
+my $dbh = new DB_File::BTREEINFO ;
+ok(1, ! defined $dbh->{flags}) ;
+ok(2, ! defined $dbh->{cachesize}) ;
+ok(3, ! defined $dbh->{psize}) ;
+ok(4, ! defined $dbh->{lorder}) ;
+ok(5, ! defined $dbh->{minkeypage}) ;
+ok(6, ! defined $dbh->{maxkeypage}) ;
+ok(7, ! defined $dbh->{compare}) ;
+ok(8, ! defined $dbh->{prefix}) ;
+
+$dbh->{flags} = 3000 ;
+ok(9, $dbh->{flags} == 3000) ;
+
+$dbh->{cachesize} = 9000 ;
+ok(10, $dbh->{cachesize} == 9000);
+
+$dbh->{psize} = 400 ;
+ok(11, $dbh->{psize} == 400) ;
+
+$dbh->{lorder} = 65 ;
+ok(12, $dbh->{lorder} == 65) ;
+
+$dbh->{minkeypage} = 123 ;
+ok(13, $dbh->{minkeypage} == 123) ;
+
+$dbh->{maxkeypage} = 1234 ;
+ok(14, $dbh->{maxkeypage} == 1234 );
+
+$dbh->{compare} = 1234 ;
+ok(15, $dbh->{compare} == 1234) ;
+
+$dbh->{prefix} = 1234 ;
+ok(16, $dbh->{prefix} == 1234 );
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(17, $@ =~ /^DB_File::BTREEINFO::STORE - Unknown element 'fred' at/ ) ;
+eval 'my $q = $dbh->{fred}' ;
+ok(18, $@ =~ /^DB_File::BTREEINFO::FETCH - Unknown element 'fred' at/ ) ;
+
+# Now check the interface to BTREE
+
+my ($X, %h) ;
+ok(19, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE )) ;
+
+my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(20, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
+ || $^O eq 'amigaos' || $^O eq 'MSWin32' || $^O eq 'NetWare');
+
+my ($key, $value, $i);
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(21, !$i ) ;
+
+$h{'goner1'} = 'snork';
+
+$h{'abc'} = 'ABC';
+ok(22, $h{'abc'} eq 'ABC' );
+ok(23, ! defined $h{'jimmy'} ) ;
+ok(24, ! exists $h{'jimmy'} ) ;
+ok(25, defined $h{'abc'} ) ;
+
+$h{'def'} = 'DEF';
+$h{'jkl','mno'} = "JKL\034MNO";
+$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
+$h{'a'} = 'A';
+
+#$h{'b'} = 'B';
+$X->STORE('b', 'B') ;
+
+$h{'c'} = 'C';
+
+#$h{'d'} = 'D';
+$X->put('d', 'D') ;
+
+$h{'e'} = 'E';
+$h{'f'} = 'F';
+$h{'g'} = 'X';
+$h{'h'} = 'H';
+$h{'i'} = 'I';
+
+$h{'goner2'} = 'snork';
+delete $h{'goner2'};
+
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(%h);
+
+# tie to the same file again
+ok(26, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE)) ;
+
+# Modify an entry from the previous tie
+$h{'g'} = 'G';
+
+$h{'j'} = 'J';
+$h{'k'} = 'K';
+$h{'l'} = 'L';
+$h{'m'} = 'M';
+$h{'n'} = 'N';
+$h{'o'} = 'O';
+$h{'p'} = 'P';
+$h{'q'} = 'Q';
+$h{'r'} = 'R';
+$h{'s'} = 'S';
+$h{'t'} = 'T';
+$h{'u'} = 'U';
+$h{'v'} = 'V';
+$h{'w'} = 'W';
+$h{'x'} = 'X';
+$h{'y'} = 'Y';
+$h{'z'} = 'Z';
+
+$h{'goner3'} = 'snork';
+
+delete $h{'goner1'};
+$X->DELETE('goner3');
+
+my @keys = keys(%h);
+my @values = values(%h);
+
+ok(27, $#keys == 29 && $#values == 29) ;
+
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
+ $key =~ y/a-z/A-Z/;
+ $i++ if $key eq $value;
+ }
+}
+
+ok(28, $i == 30) ;
+
+@keys = ('blurfl', keys(%h), 'dyick');
+ok(29, $#keys == 31) ;
+
+#Check that the keys can be retrieved in order
+my @b = keys %h ;
+my @c = sort lexical @b ;
+ok(30, ArrayCompare(\@b, \@c)) ;
+
+$h{'foo'} = '';
+ok(31, $h{'foo'} eq '' ) ;
+
+# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
+# This feature was reenabled in version 3.1 of Berkeley DB.
+my $result = 0 ;
+if ($null_keys_allowed) {
+ $h{''} = 'bar';
+ $result = ( $h{''} eq 'bar' );
+}
+else
+ { $result = 1 }
+ok(32, $result) ;
+
+# check cache overflow and numeric keys and contents
+my $ok = 1;
+for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
+for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
+ok(33, $ok);
+
+($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(34, $size > 0 );
+
+@h{0..200} = 200..400;
+my @foo = @h{0..200};
+ok(35, join(':',200..400) eq join(':',@foo) );
+
+# Now check all the non-tie specific stuff
+
+
+# Check R_NOOVERWRITE flag will make put fail when attempting to overwrite
+# an existing record.
+
+my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
+ok(36, $status == 1 );
+
+# check that the value of the key 'x' has not been changed by the
+# previous test
+ok(37, $h{'x'} eq 'X' );
+
+# standard put
+$status = $X->put('key', 'value') ;
+ok(38, $status == 0 );
+
+#check that previous put can be retrieved
+$value = 0 ;
+$status = $X->get('key', $value) ;
+ok(39, $status == 0 );
+ok(40, $value eq 'value' );
+
+# Attempting to delete an existing key should work
+
+$status = $X->del('q') ;
+ok(41, $status == 0 );
+if ($null_keys_allowed) {
+ $status = $X->del('') ;
+} else {
+ $status = 0 ;
+}
+ok(42, $status == 0 );
+
+# Make sure that the key deleted, cannot be retrieved
+ok(43, ! defined $h{'q'}) ;
+ok(44, ! defined $h{''}) ;
+
+undef $X ;
+untie %h ;
+
+ok(45, $X = tie(%h, 'DB_File',$Dfile, O_RDWR, 0640, $DB_BTREE ));
+
+# Attempting to delete a non-existant key should fail
+
+$status = $X->del('joe') ;
+ok(46, $status == 1 );
+
+# Check the get interface
+
+# First a non-existing key
+$status = $X->get('aaaa', $value) ;
+ok(47, $status == 1 );
+
+# Next an existing key
+$status = $X->get('a', $value) ;
+ok(48, $status == 0 );
+ok(49, $value eq 'A' );
+
+# seq
+# ###
+
+# use seq to find an approximate match
+$key = 'ke' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(50, $status == 0 );
+ok(51, $key eq 'key' );
+ok(52, $value eq 'value' );
+
+# seq when the key does not match
+$key = 'zzz' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(53, $status == 1 );
+
+
+# use seq to set the cursor, then delete the record @ the cursor.
+
+$key = 'x' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(54, $status == 0 );
+ok(55, $key eq 'x' );
+ok(56, $value eq 'X' );
+$status = $X->del(0, R_CURSOR) ;
+ok(57, $status == 0 );
+$status = $X->get('x', $value) ;
+ok(58, $status == 1 );
+
+# ditto, but use put to replace the key/value pair.
+$key = 'y' ;
+$value = '' ;
+$status = $X->seq($key, $value, R_CURSOR) ;
+ok(59, $status == 0 );
+ok(60, $key eq 'y' );
+ok(61, $value eq 'Y' );
+
+$key = "replace key" ;
+$value = "replace value" ;
+$status = $X->put($key, $value, R_CURSOR) ;
+ok(62, $status == 0 );
+ok(63, $key eq 'replace key' );
+ok(64, $value eq 'replace value' );
+$status = $X->get('y', $value) ;
+ok(65, 1) ; # hard-wire to always pass. the previous test ($status == 1)
+ # only worked because of a bug in 1.85/6
+
+# use seq to walk forwards through a file
+
+$status = $X->seq($key, $value, R_FIRST) ;
+ok(66, $status == 0 );
+my $previous = $key ;
+
+$ok = 1 ;
+while (($status = $X->seq($key, $value, R_NEXT)) == 0)
+{
+ ($ok = 0), last if ($previous cmp $key) == 1 ;
+}
+
+ok(67, $status == 1 );
+ok(68, $ok == 1 );
+
+# use seq to walk backwards through a file
+$status = $X->seq($key, $value, R_LAST) ;
+ok(69, $status == 0 );
+$previous = $key ;
+
+$ok = 1 ;
+while (($status = $X->seq($key, $value, R_PREV)) == 0)
+{
+ ($ok = 0), last if ($previous cmp $key) == -1 ;
+ #print "key = [$key] value = [$value]\n" ;
+}
+
+ok(70, $status == 1 );
+ok(71, $ok == 1 );
+
+
+# check seq FIRST/LAST
+
+# sync
+# ####
+
+$status = $X->sync ;
+ok(72, $status == 0 );
+
+
+# fd
+# ##
+
+$status = $X->fd ;
+ok(73, $status != 0 );
+
+
+undef $X ;
+untie %h ;
+
+unlink $Dfile;
+
+# Now try an in memory file
+my $Y;
+ok(74, $Y = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_BTREE ));
+
+# fd with an in memory file should return failure
+$status = $Y->fd ;
+ok(75, $status == -1 );
+
+
+undef $Y ;
+untie %h ;
+
+# Duplicate keys
+my $bt = new DB_File::BTREEINFO ;
+$bt->{flags} = R_DUP ;
+my ($YY, %hh);
+ok(76, $YY = tie(%hh, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $bt )) ;
+
+$hh{'Wall'} = 'Larry' ;
+$hh{'Wall'} = 'Stone' ; # Note the duplicate key
+$hh{'Wall'} = 'Brick' ; # Note the duplicate key
+$hh{'Wall'} = 'Brick' ; # Note the duplicate key and value
+$hh{'Smith'} = 'John' ;
+$hh{'mouse'} = 'mickey' ;
+
+# first work in scalar context
+ok(77, scalar $YY->get_dup('Unknown') == 0 );
+ok(78, scalar $YY->get_dup('Smith') == 1 );
+ok(79, scalar $YY->get_dup('Wall') == 4 );
+
+# now in list context
+my @unknown = $YY->get_dup('Unknown') ;
+ok(80, "@unknown" eq "" );
+
+my @smith = $YY->get_dup('Smith') ;
+ok(81, "@smith" eq "John" );
+
+{
+my @wall = $YY->get_dup('Wall') ;
+my %wall ;
+@wall{@wall} = @wall ;
+ok(82, (@wall == 4 && $wall{'Larry'} && $wall{'Stone'} && $wall{'Brick'}) );
+}
+
+# hash
+my %unknown = $YY->get_dup('Unknown', 1) ;
+ok(83, keys %unknown == 0 );
+
+my %smith = $YY->get_dup('Smith', 1) ;
+ok(84, keys %smith == 1 && $smith{'John'}) ;
+
+my %wall = $YY->get_dup('Wall', 1) ;
+ok(85, keys %wall == 3 && $wall{'Larry'} == 1 && $wall{'Stone'} == 1
+ && $wall{'Brick'} == 2);
+
+undef $YY ;
+untie %hh ;
+unlink $Dfile;
+
+
+# test multiple callbacks
+my $Dfile1 = "btree1" ;
+my $Dfile2 = "btree2" ;
+my $Dfile3 = "btree3" ;
+
+my $dbh1 = new DB_File::BTREEINFO ;
+$dbh1->{compare} = sub {
+ no warnings 'numeric' ;
+ $_[0] <=> $_[1] } ;
+
+my $dbh2 = new DB_File::BTREEINFO ;
+$dbh2->{compare} = sub { $_[0] cmp $_[1] } ;
+
+my $dbh3 = new DB_File::BTREEINFO ;
+$dbh3->{compare} = sub { length $_[0] <=> length $_[1] } ;
+
+
+my (%g, %k);
+tie(%h, 'DB_File',$Dfile1, O_RDWR|O_CREAT, 0640, $dbh1 ) ;
+tie(%g, 'DB_File',$Dfile2, O_RDWR|O_CREAT, 0640, $dbh2 ) ;
+tie(%k, 'DB_File',$Dfile3, O_RDWR|O_CREAT, 0640, $dbh3 ) ;
+
+my @Keys = qw( 0123 12 -1234 9 987654321 def ) ;
+my (@srt_1, @srt_2, @srt_3);
+{
+ no warnings 'numeric' ;
+ @srt_1 = sort { $a <=> $b } @Keys ;
+}
+@srt_2 = sort { $a cmp $b } @Keys ;
+@srt_3 = sort { length $a <=> length $b } @Keys ;
+
+foreach (@Keys) {
+ $h{$_} = 1 ;
+ $g{$_} = 1 ;
+ $k{$_} = 1 ;
+}
+
+sub ArrayCompare
+{
+ my($a, $b) = @_ ;
+
+ return 0 if @$a != @$b ;
+
+ foreach (1 .. length @$a)
+ {
+ return 0 unless $$a[$_] eq $$b[$_] ;
+ }
+
+ 1 ;
+}
+
+ok(86, ArrayCompare (\@srt_1, [keys %h]) );
+ok(87, ArrayCompare (\@srt_2, [keys %g]) );
+ok(88, ArrayCompare (\@srt_3, [keys %k]) );
+
+untie %h ;
+untie %g ;
+untie %k ;
+unlink $Dfile1, $Dfile2, $Dfile3 ;
+
+# clear
+# #####
+
+ok(89, tie(%h, 'DB_File', $Dfile1, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+foreach (1 .. 10)
+ { $h{$_} = $_ * 100 }
+
+# check that there are 10 elements in the hash
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(90, $i == 10);
+
+# now clear the hash
+%h = () ;
+
+# check it is empty
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(91, $i == 0);
+
+untie %h ;
+unlink $Dfile1 ;
+
+{
+ # check that attempting to tie an array to a DB_BTREE will fail
+
+ my $filename = "xyz" ;
+ my @x ;
+ eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE ; } ;
+ ok(92, $@ =~ /^DB_File can only tie an associative array to a DB_BTREE database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(93, $@ eq "") ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB","dbbtree.tmp", O_RDWR|O_CREAT, 0640, $DB_BTREE );
+ ' ;
+
+ main::ok(94, $@ eq "") ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok(95, $@ eq "") ;
+ main::ok(96, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
+ main::ok(97, $@ eq "") ;
+ main::ok(98, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(99, $@ eq "" ) ;
+ main::ok(100, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok(101, $@ eq "") ;
+ main::ok(102, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(%h);
+ unlink "SubDB.pm", "dbbtree.tmp" ;
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(103, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok(104, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(105, $h{"fred"} eq "joe");
+ # fk sk fv sv
+ ok(106, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(107, $db->FIRSTKEY() eq "fred") ;
+ # fk sk fv sv
+ ok(108, checkOutput( "fred", "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok(109, checkOutput( "", "fred", "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(110, $h{"Fred"} eq "[Jxe]");
+ # fk sk fv sv
+ ok(111, checkOutput( "", "fred", "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(112, $db->FIRSTKEY() eq "FRED") ;
+ # fk sk fv sv
+ ok(113, checkOutput( "FRED", "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(114, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(115, $h{"fred"} eq "joe");
+ ok(116, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(117, $db->FIRSTKEY() eq "fred") ;
+ ok(118, checkOutput( "fred", "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(119, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(120, $h{"fred"} eq "joe");
+ ok(121, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(122, $db->FIRSTKEY() eq "fred") ;
+ ok(123, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok(124, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(125, $result{"store key"} eq "store key - 1: [fred]");
+ ok(126, $result{"store value"} eq "store value - 1: [joe]");
+ ok(127, ! defined $result{"fetch key"} );
+ ok(128, ! defined $result{"fetch value"} );
+ ok(129, $_ eq "original") ;
+
+ ok(130, $db->FIRSTKEY() eq "fred") ;
+ ok(131, $result{"store key"} eq "store key - 1: [fred]");
+ ok(132, $result{"store value"} eq "store value - 1: [joe]");
+ ok(133, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(134, ! defined $result{"fetch value"} );
+ ok(135, $_ eq "original") ;
+
+ $h{"jim"} = "john" ;
+ ok(136, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(137, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(138, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(139, ! defined $result{"fetch value"} );
+ ok(140, $_ eq "original") ;
+
+ ok(141, $h{"fred"} eq "joe");
+ ok(142, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(143, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(144, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(145, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(146, $_ eq "original") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(147, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok(148, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 1
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ my %h ;
+
+ sub Compare
+ {
+ my ($key1, $key2) = @_ ;
+ "\L$key1" cmp "\L$key2" ;
+ }
+
+ # specify the Perl sub that will do the comparison
+ $DB_BTREE->{'compare'} = \&Compare ;
+
+ unlink "tree" ;
+ tie %h, "DB_File", "tree", O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open file 'tree': $!\n" ;
+
+ # Add a key/value pair to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+ $h{'duck'} = 'donald' ;
+
+ # Delete
+ delete $h{"duck"} ;
+
+ # Cycle through the keys printing them in order.
+ # Note it is not necessary to sort the keys as
+ # the btree will have kept them in order automatically.
+ foreach (keys %h)
+ { print "$_\n" }
+
+ untie %h ;
+
+ unlink "tree" ;
+ }
+
+ delete $DB_BTREE->{'compare'} ;
+
+ ok(149, docat_del($file) eq <<'EOM') ;
+mouse
+Smith
+Wall
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 2
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename %h ) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the associative array
+ # and print each key/value pair.
+ foreach (keys %h)
+ { print "$_ -> $h{$_}\n" }
+
+ untie %h ;
+
+ unlink $filename ;
+ }
+
+ ok(150, docat_del($file) eq ($db185mode ? <<'EOM' : <<'EOM') ) ;
+Smith -> John
+Wall -> Brick
+Wall -> Brick
+Wall -> Brick
+mouse -> mickey
+EOM
+Smith -> John
+Wall -> Larry
+Wall -> Larry
+Wall -> Larry
+mouse -> mickey
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 3
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $status $key $value) ;
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'Wall'} = 'Larry' ;
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key
+ $h{'Wall'} = 'Brick' ; # Note the duplicate key and value
+ $h{'Smith'} = 'John' ;
+ $h{'mouse'} = 'mickey' ;
+
+ # iterate through the btree using seq
+ # and print each key/value pair.
+ $key = $value = 0 ;
+ for ($status = $x->seq($key, $value, R_FIRST) ;
+ $status == 0 ;
+ $status = $x->seq($key, $value, R_NEXT) )
+ { print "$key -> $value\n" }
+
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(151, docat_del($file) eq ($db185mode == 1 ? <<'EOM' : <<'EOM') ) ;
+Smith -> John
+Wall -> Brick
+Wall -> Brick
+Wall -> Larry
+mouse -> mickey
+EOM
+Smith -> John
+Wall -> Larry
+Wall -> Brick
+Wall -> Brick
+mouse -> mickey
+EOM
+
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 4
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h ) ;
+
+ $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ my $cnt = $x->get_dup("Wall") ;
+ print "Wall occurred $cnt times\n" ;
+
+ my %hash = $x->get_dup("Wall", 1) ;
+ print "Larry is there\n" if $hash{'Larry'} ;
+ print "There are $hash{'Brick'} Brick Walls\n" ;
+
+ my @list = sort $x->get_dup("Wall") ;
+ print "Wall => [@list]\n" ;
+
+ @list = $x->get_dup("Smith") ;
+ print "Smith => [@list]\n" ;
+
+ @list = $x->get_dup("Dog") ;
+ print "Dog => [@list]\n" ;
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(152, docat_del($file) eq <<'EOM') ;
+Wall occurred 3 times
+Larry is there
+There are 2 Brick Walls
+Wall => [Brick Brick Larry]
+Smith => [John]
+Dog => []
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 5
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $found) ;
+
+ my $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ $found = ( $x->find_dup("Wall", "Harry") == 0 ? "" : "not") ;
+ print "Harry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+ }
+
+ ok(153, docat_del($file) eq <<'EOM') ;
+Larry Wall is there
+Harry Wall is not there
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 6
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+
+ use vars qw($filename $x %h $found) ;
+
+ my $filename = "tree" ;
+
+ # Enable duplicate records
+ $DB_BTREE->{'flags'} = R_DUP ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ $x->del_dup("Wall", "Larry") ;
+
+ $found = ( $x->find_dup("Wall", "Larry") == 0 ? "" : "not") ;
+ print "Larry Wall is $found there\n" ;
+
+ undef $x ;
+ untie %h ;
+
+ unlink $filename ;
+ }
+
+ ok(154, docat_del($file) eq <<'EOM') ;
+Larry Wall is not there
+EOM
+
+ {
+ my $redirect = new Redirect $file ;
+
+ # BTREE example 7
+ ###
+
+ use warnings FATAL => qw(all) ;
+ use strict ;
+ use DB_File ;
+ use Fcntl ;
+
+ use vars qw($filename $x %h $st $key $value) ;
+
+ sub match
+ {
+ my $key = shift ;
+ my $value = 0;
+ my $orig_key = $key ;
+ $x->seq($key, $value, R_CURSOR) ;
+ print "$orig_key\t-> $key\t-> $value\n" ;
+ }
+
+ $filename = "tree" ;
+ unlink $filename ;
+
+ $x = tie %h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_BTREE
+ or die "Cannot open $filename: $!\n";
+
+ # Add some key/value pairs to the file
+ $h{'mouse'} = 'mickey' ;
+ $h{'Wall'} = 'Larry' ;
+ $h{'Walls'} = 'Brick' ;
+ $h{'Smith'} = 'John' ;
+
+
+ $key = $value = 0 ;
+ print "IN ORDER\n" ;
+ for ($st = $x->seq($key, $value, R_FIRST) ;
+ $st == 0 ;
+ $st = $x->seq($key, $value, R_NEXT) )
+
+ { print "$key -> $value\n" }
+
+ print "\nPARTIAL MATCH\n" ;
+
+ match "Wa" ;
+ match "A" ;
+ match "a" ;
+
+ undef $x ;
+ untie %h ;
+
+ unlink $filename ;
+
+ }
+
+ ok(155, docat_del($file) eq <<'EOM') ;
+IN ORDER
+Smith -> John
+Wall -> Larry
+Walls -> Brick
+mouse -> mickey
+
+PARTIAL MATCH
+Wa -> Wall -> Larry
+A -> Smith -> John
+a -> mouse -> mickey
+EOM
+
+}
+
+#{
+# # R_SETCURSOR
+# use strict ;
+# my (%h, $db) ;
+# unlink $Dfile;
+#
+# ok(156, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_BTREE ) );
+#
+# $h{abc} = 33 ;
+# my $k = "newest" ;
+# my $v = 44 ;
+# my $status = $db->put($k, $v, R_SETCURSOR) ;
+# print "status = [$status]\n" ;
+# ok(157, $status == 0) ;
+# $status = $db->del($k, R_CURSOR) ;
+# print "status = [$status]\n" ;
+# ok(158, $status == 0) ;
+# $k = "newest" ;
+# ok(159, $db->get($k, $v, R_CURSOR)) ;
+#
+# ok(160, keys %h == 1) ;
+#
+# undef $db ;
+# untie %h;
+# unlink $Dfile;
+#}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
+ or die "Can't open file: $!\n" ;
+ $h{ABC} = undef;
+ ok(156, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_BTREE
+ or die "Can't open file: $!\n" ;
+ %h = (); ;
+ ok(157, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+exit ;
diff --git a/db/perl/DB_File/t/db-hash.t b/db/perl/DB_File/t/db-hash.t
new file mode 100644
index 000000000..daf0ea81a
--- /dev/null
+++ b/db/perl/DB_File/t/db-hash.t
@@ -0,0 +1,756 @@
+#!./perl
+
+use warnings ;
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..111\n";
+ exit 0;
+ }
+ }
+}
+
+use strict;
+use warnings;
+use DB_File;
+use Fcntl;
+
+print "1..111\n";
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT>;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+my $Dfile = "dbhash.tmp";
+my $null_keys_allowed = ($DB_File::db_ver < 2.004010
+ || $DB_File::db_ver >= 3.1 );
+
+unlink $Dfile;
+
+umask(0);
+
+# Check the interface to HASHINFO
+
+my $dbh = new DB_File::HASHINFO ;
+
+ok(1, ! defined $dbh->{bsize}) ;
+ok(2, ! defined $dbh->{ffactor}) ;
+ok(3, ! defined $dbh->{nelem}) ;
+ok(4, ! defined $dbh->{cachesize}) ;
+ok(5, ! defined $dbh->{hash}) ;
+ok(6, ! defined $dbh->{lorder}) ;
+
+$dbh->{bsize} = 3000 ;
+ok(7, $dbh->{bsize} == 3000 );
+
+$dbh->{ffactor} = 9000 ;
+ok(8, $dbh->{ffactor} == 9000 );
+
+$dbh->{nelem} = 400 ;
+ok(9, $dbh->{nelem} == 400 );
+
+$dbh->{cachesize} = 65 ;
+ok(10, $dbh->{cachesize} == 65 );
+
+$dbh->{hash} = "abc" ;
+ok(11, $dbh->{hash} eq "abc" );
+
+$dbh->{lorder} = 1234 ;
+ok(12, $dbh->{lorder} == 1234 );
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(13, $@ =~ /^DB_File::HASHINFO::STORE - Unknown element 'fred' at/ );
+eval 'my $q = $dbh->{fred}' ;
+ok(14, $@ =~ /^DB_File::HASHINFO::FETCH - Unknown element 'fred' at/ );
+
+
+# Now check the interface to HASH
+my ($X, %h);
+ok(15, $X = tie(%h, 'DB_File',$Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(16, ($mode & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640) ||
+ $^O eq 'amigaos' || $^O eq 'MSWin32' || $^O eq 'NetWare');
+
+my ($key, $value, $i);
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(17, !$i );
+
+$h{'goner1'} = 'snork';
+
+$h{'abc'} = 'ABC';
+ok(18, $h{'abc'} eq 'ABC' );
+ok(19, !defined $h{'jimmy'} );
+ok(20, !exists $h{'jimmy'} );
+ok(21, exists $h{'abc'} );
+
+$h{'def'} = 'DEF';
+$h{'jkl','mno'} = "JKL\034MNO";
+$h{'a',2,3,4,5} = join("\034",'A',2,3,4,5);
+$h{'a'} = 'A';
+
+#$h{'b'} = 'B';
+$X->STORE('b', 'B') ;
+
+$h{'c'} = 'C';
+
+#$h{'d'} = 'D';
+$X->put('d', 'D') ;
+
+$h{'e'} = 'E';
+$h{'f'} = 'F';
+$h{'g'} = 'X';
+$h{'h'} = 'H';
+$h{'i'} = 'I';
+
+$h{'goner2'} = 'snork';
+delete $h{'goner2'};
+
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(%h);
+
+
+# tie to the same file again, do not supply a type - should default to HASH
+ok(22, $X = tie(%h,'DB_File',$Dfile, O_RDWR, 0640) );
+
+# Modify an entry from the previous tie
+$h{'g'} = 'G';
+
+$h{'j'} = 'J';
+$h{'k'} = 'K';
+$h{'l'} = 'L';
+$h{'m'} = 'M';
+$h{'n'} = 'N';
+$h{'o'} = 'O';
+$h{'p'} = 'P';
+$h{'q'} = 'Q';
+$h{'r'} = 'R';
+$h{'s'} = 'S';
+$h{'t'} = 'T';
+$h{'u'} = 'U';
+$h{'v'} = 'V';
+$h{'w'} = 'W';
+$h{'x'} = 'X';
+$h{'y'} = 'Y';
+$h{'z'} = 'Z';
+
+$h{'goner3'} = 'snork';
+
+delete $h{'goner1'};
+$X->DELETE('goner3');
+
+my @keys = keys(%h);
+my @values = values(%h);
+
+ok(23, $#keys == 29 && $#values == 29) ;
+
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ if ($key eq $keys[$i] && $value eq $values[$i] && $key eq lc($value)) {
+ $key =~ y/a-z/A-Z/;
+ $i++ if $key eq $value;
+ }
+}
+
+ok(24, $i == 30) ;
+
+@keys = ('blurfl', keys(%h), 'dyick');
+ok(25, $#keys == 31) ;
+
+$h{'foo'} = '';
+ok(26, $h{'foo'} eq '' );
+
+# Berkeley DB from version 2.4.10 to 3.0 does not allow null keys.
+# This feature was reenabled in version 3.1 of Berkeley DB.
+my $result = 0 ;
+if ($null_keys_allowed) {
+ $h{''} = 'bar';
+ $result = ( $h{''} eq 'bar' );
+}
+else
+ { $result = 1 }
+ok(27, $result) ;
+
+# check cache overflow and numeric keys and contents
+my $ok = 1;
+for ($i = 1; $i < 200; $i++) { $h{$i + 0} = $i + 0; }
+for ($i = 1; $i < 200; $i++) { $ok = 0 unless $h{$i} == $i; }
+ok(28, $ok );
+
+($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime,$ctime,
+ $blksize,$blocks) = stat($Dfile);
+ok(29, $size > 0 );
+
+@h{0..200} = 200..400;
+my @foo = @h{0..200};
+ok(30, join(':',200..400) eq join(':',@foo) );
+
+
+# Now check all the non-tie specific stuff
+
+# Check NOOVERWRITE will make put fail when attempting to overwrite
+# an existing record.
+
+my $status = $X->put( 'x', 'newvalue', R_NOOVERWRITE) ;
+ok(31, $status == 1 );
+
+# check that the value of the key 'x' has not been changed by the
+# previous test
+ok(32, $h{'x'} eq 'X' );
+
+# standard put
+$status = $X->put('key', 'value') ;
+ok(33, $status == 0 );
+
+#check that previous put can be retrieved
+$value = 0 ;
+$status = $X->get('key', $value) ;
+ok(34, $status == 0 );
+ok(35, $value eq 'value' );
+
+# Attempting to delete an existing key should work
+
+$status = $X->del('q') ;
+ok(36, $status == 0 );
+
+# Make sure that the key deleted, cannot be retrieved
+{
+ no warnings 'uninitialized' ;
+ ok(37, $h{'q'} eq undef );
+}
+
+# Attempting to delete a non-existant key should fail
+
+$status = $X->del('joe') ;
+ok(38, $status == 1 );
+
+# Check the get interface
+
+# First a non-existing key
+$status = $X->get('aaaa', $value) ;
+ok(39, $status == 1 );
+
+# Next an existing key
+$status = $X->get('a', $value) ;
+ok(40, $status == 0 );
+ok(41, $value eq 'A' );
+
+# seq
+# ###
+
+# ditto, but use put to replace the key/value pair.
+
+# use seq to walk backwards through a file - check that this reversed is
+
+# check seq FIRST/LAST
+
+# sync
+# ####
+
+$status = $X->sync ;
+ok(42, $status == 0 );
+
+
+# fd
+# ##
+
+$status = $X->fd ;
+ok(43, $status != 0 );
+
+undef $X ;
+untie %h ;
+
+unlink $Dfile;
+
+# clear
+# #####
+
+ok(44, tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+foreach (1 .. 10)
+ { $h{$_} = $_ * 100 }
+
+# check that there are 10 elements in the hash
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(45, $i == 10);
+
+# now clear the hash
+%h = () ;
+
+# check it is empty
+$i = 0 ;
+while (($key,$value) = each(%h)) {
+ $i++;
+}
+ok(46, $i == 0);
+
+untie %h ;
+unlink $Dfile ;
+
+
+# Now try an in memory file
+ok(47, $X = tie(%h, 'DB_File',undef, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+# fd with an in memory file should return fail
+$status = $X->fd ;
+ok(48, $status == -1 );
+
+undef $X ;
+untie %h ;
+
+{
+ # check ability to override the default hashing
+ my %x ;
+ my $filename = "xyz" ;
+ my $hi = new DB_File::HASHINFO ;
+ $::count = 0 ;
+ $hi->{hash} = sub { ++$::count ; length $_[0] } ;
+ ok(49, tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $hi ) ;
+ $h{"abc"} = 123 ;
+ ok(50, $h{"abc"} == 123) ;
+ untie %x ;
+ unlink $filename ;
+ ok(51, $::count >0) ;
+}
+
+{
+ # check that attempting to tie an array to a DB_HASH will fail
+
+ my $filename = "xyz" ;
+ my @x ;
+ eval { tie @x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_HASH ; } ;
+ ok(52, $@ =~ /^DB_File can only tie an associative array to a DB_HASH database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(53, $@ eq "") ;
+ my %h ;
+ my $X ;
+ eval '
+ $X = tie(%h, "SubDB","dbhash.tmp", O_RDWR|O_CREAT, 0640, $DB_HASH );
+ ' ;
+
+ main::ok(54, $@ eq "") ;
+
+ my $ret = eval '$h{"fred"} = 3 ; return $h{"fred"} ' ;
+ main::ok(55, $@ eq "") ;
+ main::ok(56, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put("joe", 4) ; $X->get("joe", $value) ; return $value' ;
+ main::ok(57, $@ eq "") ;
+ main::ok(58, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(59, $@ eq "" ) ;
+ main::ok(60, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method("joe") ' ;
+ main::ok(61, $@ eq "") ;
+ main::ok(62, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(%h);
+ unlink "SubDB.pm", "dbhash.tmp" ;
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(63, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ # fk sk fv sv
+ ok(64, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(65, $h{"fred"} eq "joe");
+ # fk sk fv sv
+ ok(66, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(67, $db->FIRSTKEY() eq "fred") ;
+ # fk sk fv sv
+ ok(68, checkOutput( "fred", "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { $_ = uc $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ = lc $_ ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"Fred"} = "Joe" ;
+ # fk sk fv sv
+ ok(69, checkOutput( "", "fred", "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(70, $h{"Fred"} eq "[Jxe]");
+ # fk sk fv sv
+ ok(71, checkOutput( "", "fred", "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(72, $db->FIRSTKEY() eq "FRED") ;
+ # fk sk fv sv
+ ok(73, checkOutput( "FRED", "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(74, checkOutput( "", "fred", "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(75, $h{"fred"} eq "joe");
+ ok(76, checkOutput( "", "fred", "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(77, $db->FIRSTKEY() eq "fred") ;
+ ok(78, checkOutput( "fred", "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h{"fred"} = "joe" ;
+ ok(79, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(80, $h{"fred"} eq "joe");
+ ok(81, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(82, $db->FIRSTKEY() eq "fred") ;
+ ok(83, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+
+ unlink $Dfile;
+ ok(84, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h{"fred"} = "joe" ;
+ ok(85, $result{"store key"} eq "store key - 1: [fred]");
+ ok(86, $result{"store value"} eq "store value - 1: [joe]");
+ ok(87, ! defined $result{"fetch key"} );
+ ok(88, ! defined $result{"fetch value"} );
+ ok(89, $_ eq "original") ;
+
+ ok(90, $db->FIRSTKEY() eq "fred") ;
+ ok(91, $result{"store key"} eq "store key - 1: [fred]");
+ ok(92, $result{"store value"} eq "store value - 1: [joe]");
+ ok(93, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(94, ! defined $result{"fetch value"} );
+ ok(95, $_ eq "original") ;
+
+ $h{"jim"} = "john" ;
+ ok(96, $result{"store key"} eq "store key - 2: [fred jim]");
+ ok(97, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(98, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(99, ! defined $result{"fetch value"} );
+ ok(100, $_ eq "original") ;
+
+ ok(101, $h{"fred"} eq "joe");
+ ok(102, $result{"store key"} eq "store key - 3: [fred jim fred]");
+ ok(103, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(104, $result{"fetch key"} eq "fetch key - 1: [fred]");
+ ok(105, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(106, $_ eq "original") ;
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (%h, $db) ;
+ unlink $Dfile;
+
+ ok(107, $db = tie(%h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_HASH ) );
+
+ $db->filter_store_key (sub { $_ = $h{$_} }) ;
+
+ eval '$h{1} = 1234' ;
+ ok(108, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie %h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use DB_File ;
+ use vars qw( %h $k $v ) ;
+
+ unlink "fruit" ;
+ tie %h, "DB_File", "fruit", O_RDWR|O_CREAT, 0640, $DB_HASH
+ or die "Cannot open file 'fruit': $!\n";
+
+ # Add a few key/value pairs to the file
+ $h{"apple"} = "red" ;
+ $h{"orange"} = "orange" ;
+ $h{"banana"} = "yellow" ;
+ $h{"tomato"} = "red" ;
+
+ # Check for existence of a key
+ print "Banana Exists\n\n" if $h{"banana"} ;
+
+ # Delete a key/value pair.
+ delete $h{"apple"} ;
+
+ # print the contents of the file
+ while (($k, $v) = each %h)
+ { print "$k -> $v\n" }
+
+ untie %h ;
+
+ unlink "fruit" ;
+ }
+
+ ok(109, docat_del($file) eq <<'EOM') ;
+Banana Exists
+
+orange -> orange
+tomato -> red
+banana -> yellow
+EOM
+
+}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
+ $h{ABC} = undef;
+ ok(110, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my %h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie %h, 'DB_File', $Dfile or die "Can't open file: $!\n" ;
+ %h = (); ;
+ ok(111, $a eq "") ;
+ untie %h ;
+ unlink $Dfile;
+}
+
+exit ;
diff --git a/db/perl/DB_File/t/db-recno.t b/db/perl/DB_File/t/db-recno.t
new file mode 100644
index 000000000..f02991560
--- /dev/null
+++ b/db/perl/DB_File/t/db-recno.t
@@ -0,0 +1,1254 @@
+#!./perl -w
+
+use warnings;
+use strict ;
+
+BEGIN {
+ unless(grep /blib/, @INC) {
+ chdir 't' if -d 't';
+ @INC = '../lib' if -d '../lib';
+ }
+}
+
+use Config;
+
+BEGIN {
+ if(-d "lib" && -f "TEST") {
+ if ($Config{'extensions'} !~ /\bDB_File\b/ ) {
+ print "1..138\n";
+ exit 0;
+ }
+ }
+}
+
+use DB_File;
+use Fcntl;
+use vars qw($dbh $Dfile $bad_ones $FA) ;
+
+# full tied array support started in Perl 5.004_57
+# Double check to see if it is available.
+
+{
+ sub try::TIEARRAY { bless [], "try" }
+ sub try::FETCHSIZE { $FA = 1 }
+ $FA = 0 ;
+ my @a ;
+ tie @a, 'try' ;
+ my $a = @a ;
+}
+
+
+sub ok
+{
+ my $no = shift ;
+ my $result = shift ;
+
+ print "not " unless $result ;
+ print "ok $no\n" ;
+
+ return $result ;
+}
+
+{
+ package Redirect ;
+ use Symbol ;
+
+ sub new
+ {
+ my $class = shift ;
+ my $filename = shift ;
+ my $fh = gensym ;
+ open ($fh, ">$filename") || die "Cannot open $filename: $!" ;
+ my $real_stdout = select($fh) ;
+ return bless [$fh, $real_stdout ] ;
+
+ }
+ sub DESTROY
+ {
+ my $self = shift ;
+ close $self->[0] ;
+ select($self->[1]) ;
+ }
+}
+
+sub docat
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file:$!";
+ my $result = <CAT>;
+ close(CAT);
+ return $result;
+}
+
+sub docat_del
+{
+ my $file = shift;
+ local $/ = undef;
+ open(CAT,$file) || die "Cannot open $file: $!";
+ my $result = <CAT>;
+ close(CAT);
+ unlink $file ;
+ return $result;
+}
+
+sub bad_one
+{
+ print STDERR <<EOM unless $bad_ones++ ;
+#
+# Some older versions of Berkeley DB version 1 will fail tests 51,
+# 53 and 55.
+#
+# You can safely ignore the errors if you're never going to use the
+# broken functionality (recno databases with a modified bval).
+# Otherwise you'll have to upgrade your DB library.
+#
+# If you want to use Berkeley DB version 1, then 1.85 and 1.86 are the
+# last versions that were released. Berkeley DB version 2 is continually
+# being updated -- Check out http://www.sleepycat.com/ for more details.
+#
+EOM
+}
+
+my $splice_tests = 10 + 1; # ten regressions, plus the randoms
+my $total_tests = 138 ;
+$total_tests += $splice_tests if $FA ;
+print "1..$total_tests\n";
+
+my $Dfile = "recno.tmp";
+unlink $Dfile ;
+
+umask(0);
+
+# Check the interface to RECNOINFO
+
+my $dbh = new DB_File::RECNOINFO ;
+ok(1, ! defined $dbh->{bval}) ;
+ok(2, ! defined $dbh->{cachesize}) ;
+ok(3, ! defined $dbh->{psize}) ;
+ok(4, ! defined $dbh->{flags}) ;
+ok(5, ! defined $dbh->{lorder}) ;
+ok(6, ! defined $dbh->{reclen}) ;
+ok(7, ! defined $dbh->{bfname}) ;
+
+$dbh->{bval} = 3000 ;
+ok(8, $dbh->{bval} == 3000 );
+
+$dbh->{cachesize} = 9000 ;
+ok(9, $dbh->{cachesize} == 9000 );
+
+$dbh->{psize} = 400 ;
+ok(10, $dbh->{psize} == 400 );
+
+$dbh->{flags} = 65 ;
+ok(11, $dbh->{flags} == 65 );
+
+$dbh->{lorder} = 123 ;
+ok(12, $dbh->{lorder} == 123 );
+
+$dbh->{reclen} = 1234 ;
+ok(13, $dbh->{reclen} == 1234 );
+
+$dbh->{bfname} = 1234 ;
+ok(14, $dbh->{bfname} == 1234 );
+
+
+# Check that an invalid entry is caught both for store & fetch
+eval '$dbh->{fred} = 1234' ;
+ok(15, $@ =~ /^DB_File::RECNOINFO::STORE - Unknown element 'fred' at/ );
+eval 'my $q = $dbh->{fred}' ;
+ok(16, $@ =~ /^DB_File::RECNOINFO::FETCH - Unknown element 'fred' at/ );
+
+# Now check the interface to RECNOINFO
+
+my $X ;
+my @h ;
+ok(17, $X = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+
+ok(18, ((stat($Dfile))[2] & 0777) == (($^O eq 'os2' || $^O eq 'MacOS') ? 0666 : 0640)
+ || $^O eq 'MSWin32' || $^O eq 'NetWare' || $^O eq 'amigaos') ;
+
+#my $l = @h ;
+my $l = $X->length ;
+ok(19, ($FA ? @h == 0 : !$l) );
+
+my @data = qw( a b c d ever f g h i j k longername m n o p) ;
+
+$h[0] = shift @data ;
+ok(20, $h[0] eq 'a' );
+
+my $ i;
+foreach (@data)
+ { $h[++$i] = $_ }
+
+unshift (@data, 'a') ;
+
+ok(21, defined $h[1] );
+ok(22, ! defined $h[16] );
+ok(23, $FA ? @h == @data : $X->length == @data );
+
+
+# Overwrite an entry & check fetch it
+$h[3] = 'replaced' ;
+$data[3] = 'replaced' ;
+ok(24, $h[3] eq 'replaced' );
+
+#PUSH
+my @push_data = qw(added to the end) ;
+($FA ? push(@h, @push_data) : $X->push(@push_data)) ;
+push (@data, @push_data) ;
+ok(25, $h[++$i] eq 'added' );
+ok(26, $h[++$i] eq 'to' );
+ok(27, $h[++$i] eq 'the' );
+ok(28, $h[++$i] eq 'end' );
+
+# POP
+my $popped = pop (@data) ;
+my $value = ($FA ? pop @h : $X->pop) ;
+ok(29, $value eq $popped) ;
+
+# SHIFT
+$value = ($FA ? shift @h : $X->shift) ;
+my $shifted = shift @data ;
+ok(30, $value eq $shifted );
+
+# UNSHIFT
+
+# empty list
+($FA ? unshift @h,() : $X->unshift) ;
+ok(31, ($FA ? @h == @data : $X->length == @data ));
+
+my @new_data = qw(add this to the start of the array) ;
+$FA ? unshift (@h, @new_data) : $X->unshift (@new_data) ;
+unshift (@data, @new_data) ;
+ok(32, $FA ? @h == @data : $X->length == @data );
+ok(33, $h[0] eq "add") ;
+ok(34, $h[1] eq "this") ;
+ok(35, $h[2] eq "to") ;
+ok(36, $h[3] eq "the") ;
+ok(37, $h[4] eq "start") ;
+ok(38, $h[5] eq "of") ;
+ok(39, $h[6] eq "the") ;
+ok(40, $h[7] eq "array") ;
+ok(41, $h[8] eq $data[8]) ;
+
+# Brief test for SPLICE - more thorough 'soak test' is later.
+my @old;
+if ($FA) {
+ @old = splice(@h, 1, 2, qw(bananas just before));
+}
+else {
+ @old = $X->splice(1, 2, qw(bananas just before));
+}
+ok(42, $h[0] eq "add") ;
+ok(43, $h[1] eq "bananas") ;
+ok(44, $h[2] eq "just") ;
+ok(45, $h[3] eq "before") ;
+ok(46, $h[4] eq "the") ;
+ok(47, $h[5] eq "start") ;
+ok(48, $h[6] eq "of") ;
+ok(49, $h[7] eq "the") ;
+ok(50, $h[8] eq "array") ;
+ok(51, $h[9] eq $data[8]) ;
+$FA ? splice(@h, 1, 3, @old) : $X->splice(1, 3, @old);
+
+# Now both arrays should be identical
+
+my $ok = 1 ;
+my $j = 0 ;
+foreach (@data)
+{
+ $ok = 0, last if $_ ne $h[$j ++] ;
+}
+ok(52, $ok );
+
+# Neagtive subscripts
+
+# get the last element of the array
+ok(53, $h[-1] eq $data[-1] );
+ok(54, $h[-1] eq $h[ ($FA ? @h : $X->length) -1] );
+
+# get the first element using a negative subscript
+eval '$h[ - ( $FA ? @h : $X->length)] = "abcd"' ;
+ok(55, $@ eq "" );
+ok(56, $h[0] eq "abcd" );
+
+# now try to read before the start of the array
+eval '$h[ - (1 + ($FA ? @h : $X->length))] = 1234' ;
+ok(57, $@ =~ '^Modification of non-creatable array value attempted' );
+
+# IMPORTANT - $X must be undefined before the untie otherwise the
+# underlying DB close routine will not get called.
+undef $X ;
+untie(@h);
+
+unlink $Dfile;
+
+
+{
+ # Check bval defaults to \n
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ ok(58, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ ok(59, $x eq "abc\ndef\n\nghi\n") ;
+}
+
+{
+ # Change bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{bval} = "-" ;
+ ok(60, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc-def--ghi-") ;
+ bad_one() unless $ok ;
+ ok(61, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with default bval (space)
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{reclen} = 5 ;
+ ok(62, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc def ghi ") ;
+ bad_one() unless $ok ;
+ ok(63, $ok) ;
+}
+
+{
+ # Check R_FIXEDLEN with user-defined bval
+
+ my @h = () ;
+ my $dbh = new DB_File::RECNOINFO ;
+ $dbh->{flags} = R_FIXEDLEN ;
+ $dbh->{bval} = "-" ;
+ $dbh->{reclen} = 5 ;
+ ok(64, tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $dbh ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[3] = "ghi" ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ unlink $Dfile;
+ my $ok = ($x eq "abc--def-------ghi--") ;
+ bad_one() unless $ok ;
+ ok(65, $ok) ;
+}
+
+{
+ # check that attempting to tie an associative array to a DB_RECNO will fail
+
+ my $filename = "xyz" ;
+ my %x ;
+ eval { tie %x, 'DB_File', $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO ; } ;
+ ok(66, $@ =~ /^DB_File can only tie an array to a DB_RECNO database/) ;
+ unlink $filename ;
+}
+
+{
+ # sub-class test
+
+ package Another ;
+
+ use warnings ;
+ use strict ;
+
+ open(FILE, ">SubDB.pm") or die "Cannot open SubDB.pm: $!\n" ;
+ print FILE <<'EOM' ;
+
+ package SubDB ;
+
+ use warnings ;
+ use strict ;
+ use vars qw( @ISA @EXPORT) ;
+
+ require Exporter ;
+ use DB_File;
+ @ISA=qw(DB_File);
+ @EXPORT = @DB_File::EXPORT ;
+
+ sub STORE {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::STORE($key, $value * 2) ;
+ }
+
+ sub FETCH {
+ my $self = shift ;
+ my $key = shift ;
+ $self->SUPER::FETCH($key) - 1 ;
+ }
+
+ sub put {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = shift ;
+ $self->SUPER::put($key, $value * 3) ;
+ }
+
+ sub get {
+ my $self = shift ;
+ $self->SUPER::get($_[0], $_[1]) ;
+ $_[1] -= 2 ;
+ }
+
+ sub A_new_method
+ {
+ my $self = shift ;
+ my $key = shift ;
+ my $value = $self->FETCH($key) ;
+ return "[[$value]]" ;
+ }
+
+ 1 ;
+EOM
+
+ close FILE ;
+
+ BEGIN { push @INC, '.'; }
+ eval 'use SubDB ; ';
+ main::ok(67, $@ eq "") ;
+ my @h ;
+ my $X ;
+ eval '
+ $X = tie(@h, "SubDB","recno.tmp", O_RDWR|O_CREAT, 0640, $DB_RECNO );
+ ' ;
+
+ main::ok(68, $@ eq "") ;
+
+ my $ret = eval '$h[3] = 3 ; return $h[3] ' ;
+ main::ok(69, $@ eq "") ;
+ main::ok(70, $ret == 5) ;
+
+ my $value = 0;
+ $ret = eval '$X->put(1, 4) ; $X->get(1, $value) ; return $value' ;
+ main::ok(71, $@ eq "") ;
+ main::ok(72, $ret == 10) ;
+
+ $ret = eval ' R_NEXT eq main::R_NEXT ' ;
+ main::ok(73, $@ eq "" ) ;
+ main::ok(74, $ret == 1) ;
+
+ $ret = eval '$X->A_new_method(1) ' ;
+ main::ok(75, $@ eq "") ;
+ main::ok(76, $ret eq "[[11]]") ;
+
+ undef $X;
+ untie(@h);
+ unlink "SubDB.pm", "recno.tmp" ;
+
+}
+
+{
+
+ # test $#
+ my $self ;
+ unlink $Dfile;
+ ok(77, $self = tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) ;
+ $h[0] = "abc" ;
+ $h[1] = "def" ;
+ $h[2] = "ghi" ;
+ $h[3] = "jkl" ;
+ ok(78, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ untie @h ;
+ my $x = docat($Dfile) ;
+ ok(79, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to same length
+ ok(80, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 3 }
+ else
+ { $self->STORESIZE(4) }
+ ok(81, $FA ? $#h == 3 : $self->length() == 4) ;
+ undef $self ;
+ untie @h ;
+ $x = docat($Dfile) ;
+ ok(82, $x eq "abc\ndef\nghi\njkl\n") ;
+
+ # $# sets array to bigger
+ ok(83, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 6 }
+ else
+ { $self->STORESIZE(7) }
+ ok(84, $FA ? $#h == 6 : $self->length() == 7) ;
+ undef $self ;
+ untie @h ;
+ $x = docat($Dfile) ;
+ ok(85, $x eq "abc\ndef\nghi\njkl\n\n\n\n") ;
+
+ # $# sets array smaller
+ ok(86, $self = tie @h, 'DB_File', $Dfile, O_RDWR, 0640, $DB_RECNO ) ;
+ if ($FA)
+ { $#h = 2 }
+ else
+ { $self->STORESIZE(3) }
+ ok(87, $FA ? $#h == 2 : $self->length() == 3) ;
+ undef $self ;
+ untie @h ;
+ $x = docat($Dfile) ;
+ ok(88, $x eq "abc\ndef\nghi\n") ;
+
+ unlink $Dfile;
+
+
+}
+
+{
+ # DBM Filter tests
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ my ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ unlink $Dfile;
+
+ sub checkOutput
+ {
+ my($fk, $sk, $fv, $sv) = @_ ;
+ return
+ $fetch_key eq $fk && $store_key eq $sk &&
+ $fetch_value eq $fv && $store_value eq $sv &&
+ $_ eq 'original' ;
+ }
+
+ ok(89, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_fetch_key (sub { $fetch_key = $_ }) ;
+ $db->filter_store_key (sub { $store_key = $_ }) ;
+ $db->filter_fetch_value (sub { $fetch_value = $_}) ;
+ $db->filter_store_value (sub { $store_value = $_ }) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ # fk sk fv sv
+ ok(90, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(91, $h[0] eq "joe");
+ # fk sk fv sv
+ ok(92, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(93, $db->FIRSTKEY() == 0) ;
+ # fk sk fv sv
+ ok(94, checkOutput( 0, "", "", "")) ;
+
+ # replace the filters, but remember the previous set
+ my ($old_fk) = $db->filter_fetch_key
+ (sub { ++ $_ ; $fetch_key = $_ }) ;
+ my ($old_sk) = $db->filter_store_key
+ (sub { $_ *= 2 ; $store_key = $_ }) ;
+ my ($old_fv) = $db->filter_fetch_value
+ (sub { $_ = "[$_]"; $fetch_value = $_ }) ;
+ my ($old_sv) = $db->filter_store_value
+ (sub { s/o/x/g; $store_value = $_ }) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[1] = "Joe" ;
+ # fk sk fv sv
+ ok(95, checkOutput( "", 2, "", "Jxe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(96, $h[1] eq "[Jxe]");
+ # fk sk fv sv
+ ok(97, checkOutput( "", 2, "[Jxe]", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(98, $db->FIRSTKEY() == 1) ;
+ # fk sk fv sv
+ ok(99, checkOutput( 1, "", "", "")) ;
+
+ # put the original filters back
+ $db->filter_fetch_key ($old_fk);
+ $db->filter_store_key ($old_sk);
+ $db->filter_fetch_value ($old_fv);
+ $db->filter_store_value ($old_sv);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(100, checkOutput( "", 0, "", "joe")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(101, $h[0] eq "joe");
+ ok(102, checkOutput( "", 0, "joe", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(103, $db->FIRSTKEY() == 0) ;
+ ok(104, checkOutput( 0, "", "", "")) ;
+
+ # delete the filters
+ $db->filter_fetch_key (undef);
+ $db->filter_store_key (undef);
+ $db->filter_fetch_value (undef);
+ $db->filter_store_value (undef);
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ $h[0] = "joe" ;
+ ok(105, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(106, $h[0] eq "joe");
+ ok(107, checkOutput( "", "", "", "")) ;
+
+ ($fetch_key, $store_key, $fetch_value, $store_value) = ("") x 4 ;
+ ok(108, $db->FIRSTKEY() == 0) ;
+ ok(109, checkOutput( "", "", "", "")) ;
+
+ undef $db ;
+ untie @h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter with a closure
+
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+
+ unlink $Dfile;
+ ok(110, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ my %result = () ;
+
+ sub Closure
+ {
+ my ($name) = @_ ;
+ my $count = 0 ;
+ my @kept = () ;
+
+ return sub { ++$count ;
+ push @kept, $_ ;
+ $result{$name} = "$name - $count: [@kept]" ;
+ }
+ }
+
+ $db->filter_store_key(Closure("store key")) ;
+ $db->filter_store_value(Closure("store value")) ;
+ $db->filter_fetch_key(Closure("fetch key")) ;
+ $db->filter_fetch_value(Closure("fetch value")) ;
+
+ $_ = "original" ;
+
+ $h[0] = "joe" ;
+ ok(111, $result{"store key"} eq "store key - 1: [0]");
+ ok(112, $result{"store value"} eq "store value - 1: [joe]");
+ ok(113, ! defined $result{"fetch key"} );
+ ok(114, ! defined $result{"fetch value"} );
+ ok(115, $_ eq "original") ;
+
+ ok(116, $db->FIRSTKEY() == 0 ) ;
+ ok(117, $result{"store key"} eq "store key - 1: [0]");
+ ok(118, $result{"store value"} eq "store value - 1: [joe]");
+ ok(119, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(120, ! defined $result{"fetch value"} );
+ ok(121, $_ eq "original") ;
+
+ $h[7] = "john" ;
+ ok(122, $result{"store key"} eq "store key - 2: [0 7]");
+ ok(123, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(124, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(125, ! defined $result{"fetch value"} );
+ ok(126, $_ eq "original") ;
+
+ ok(127, $h[0] eq "joe");
+ ok(128, $result{"store key"} eq "store key - 3: [0 7 0]");
+ ok(129, $result{"store value"} eq "store value - 2: [joe john]");
+ ok(130, $result{"fetch key"} eq "fetch key - 1: [0]");
+ ok(131, $result{"fetch value"} eq "fetch value - 1: [joe]");
+ ok(132, $_ eq "original") ;
+
+ undef $db ;
+ untie @h;
+ unlink $Dfile;
+}
+
+{
+ # DBM Filter recursion detection
+ use warnings ;
+ use strict ;
+ my (@h, $db) ;
+ unlink $Dfile;
+
+ ok(133, $db = tie(@h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0640, $DB_RECNO ) );
+
+ $db->filter_store_key (sub { $_ = $h[0] }) ;
+
+ eval '$h[1] = 1234' ;
+ ok(134, $@ =~ /^recursion detected in filter_store_key at/ );
+
+ undef $db ;
+ untie @h;
+ unlink $Dfile;
+}
+
+
+{
+ # Examples from the POD
+
+ my $file = "xyzt" ;
+ {
+ my $redirect = new Redirect $file ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use DB_File ;
+
+ my $filename = "text" ;
+ unlink $filename ;
+
+ my @h ;
+ my $x = tie @h, "DB_File", $filename, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file 'text': $!\n" ;
+
+ # Add a few key/value pairs to the file
+ $h[0] = "orange" ;
+ $h[1] = "blue" ;
+ $h[2] = "yellow" ;
+
+ $FA ? push @h, "green", "black"
+ : $x->push("green", "black") ;
+
+ my $elements = $FA ? scalar @h : $x->length ;
+ print "The array contains $elements entries\n" ;
+
+ my $last = $FA ? pop @h : $x->pop ;
+ print "popped $last\n" ;
+
+ $FA ? unshift @h, "white"
+ : $x->unshift("white") ;
+ my $first = $FA ? shift @h : $x->shift ;
+ print "shifted $first\n" ;
+
+ # Check for existence of a key
+ print "Element 1 Exists with value $h[1]\n" if $h[1] ;
+
+ # use a negative index
+ print "The last element is $h[-1]\n" ;
+ print "The 2nd last element is $h[-2]\n" ;
+
+ undef $x ;
+ untie @h ;
+
+ unlink $filename ;
+ }
+
+ ok(135, docat_del($file) eq <<'EOM') ;
+The array contains 5 entries
+popped black
+shifted white
+Element 1 Exists with value blue
+The last element is green
+The 2nd last element is yellow
+EOM
+
+ my $save_output = "xyzt" ;
+ {
+ my $redirect = new Redirect $save_output ;
+
+ use warnings FATAL => qw(all);
+ use strict ;
+ use vars qw(@h $H $file $i) ;
+ use DB_File ;
+ use Fcntl ;
+
+ $file = "text" ;
+
+ unlink $file ;
+
+ $H = tie @h, "DB_File", $file, O_RDWR|O_CREAT, 0640, $DB_RECNO
+ or die "Cannot open file $file: $!\n" ;
+
+ # first create a text file to play with
+ $h[0] = "zero" ;
+ $h[1] = "one" ;
+ $h[2] = "two" ;
+ $h[3] = "three" ;
+ $h[4] = "four" ;
+
+
+ # Print the records in order.
+ #
+ # The length method is needed here because evaluating a tied
+ # array in a scalar context does not return the number of
+ # elements in the array.
+
+ print "\nORIGINAL\n" ;
+ foreach $i (0 .. $H->length - 1) {
+ print "$i: $h[$i]\n" ;
+ }
+
+ # use the push & pop methods
+ $a = $H->pop ;
+ $H->push("last") ;
+ print "\nThe last record was [$a]\n" ;
+
+ # and the shift & unshift methods
+ $a = $H->shift ;
+ $H->unshift("first") ;
+ print "The first record was [$a]\n" ;
+
+ # Use the API to add a new record after record 2.
+ $i = 2 ;
+ $H->put($i, "Newbie", R_IAFTER) ;
+
+ # and a new record before record 1.
+ $i = 1 ;
+ $H->put($i, "New One", R_IBEFORE) ;
+
+ # delete record 3
+ $H->del(3) ;
+
+ # now print the records in reverse order
+ print "\nREVERSE\n" ;
+ for ($i = $H->length - 1 ; $i >= 0 ; -- $i)
+ { print "$i: $h[$i]\n" }
+
+ # same again, but use the API functions instead
+ print "\nREVERSE again\n" ;
+ my ($s, $k, $v) = (0, 0, 0) ;
+ for ($s = $H->seq($k, $v, R_LAST) ;
+ $s == 0 ;
+ $s = $H->seq($k, $v, R_PREV))
+ { print "$k: $v\n" }
+
+ undef $H ;
+ untie @h ;
+
+ unlink $file ;
+ }
+
+ ok(136, docat_del($save_output) eq <<'EOM') ;
+
+ORIGINAL
+0: zero
+1: one
+2: two
+3: three
+4: four
+
+The last record was [four]
+The first record was [zero]
+
+REVERSE
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+
+REVERSE again
+5: last
+4: three
+3: Newbie
+2: one
+1: New One
+0: first
+EOM
+
+}
+
+{
+ # Bug ID 20001013.009
+ #
+ # test that $hash{KEY} = undef doesn't produce the warning
+ # Use of uninitialized value in null operation
+ use warnings ;
+ use strict ;
+ use DB_File ;
+
+ unlink $Dfile;
+ my @h ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ $h[0] = undef;
+ ok(137, $a eq "") ;
+ untie @h ;
+ unlink $Dfile;
+}
+
+{
+ # test that %hash = () doesn't produce the warning
+ # Argument "" isn't numeric in entersub
+ use warnings ;
+ use strict ;
+ use DB_File ;
+ my $a = "";
+ local $SIG{__WARN__} = sub {$a = $_[0]} ;
+
+ unlink $Dfile;
+ my @h ;
+
+ tie @h, 'DB_File', $Dfile, O_RDWR|O_CREAT, 0664, $DB_RECNO
+ or die "Can't open file: $!\n" ;
+ @h = (); ;
+ ok(138, $a eq "") ;
+ untie @h ;
+ unlink $Dfile;
+}
+
+# Only test splice if this is a newish version of Perl
+exit unless $FA ;
+
+# Test SPLICE
+#
+# These are a few regression tests: bundles of five arguments to pass
+# to test_splice(). The first four arguments correspond to those
+# given to splice(), and the last says which context to call it in
+# (scalar, list or void).
+#
+# The expected result is not needed because we get that by running
+# Perl's built-in splice().
+#
+my @tests = ([ [ 'falsely', 'dinosaur', 'remedy', 'commotion',
+ 'rarely', 'paleness' ],
+ -4, -2,
+ [ 'redoubled', 'Taylorize', 'Zoe', 'halogen' ],
+ 'void' ],
+
+ [ [ 'a' ], -2, 1, [ 'B' ], 'void' ],
+
+ [ [ 'Hartley', 'Islandia', 'assents', 'wishful' ],
+ 0, -4,
+ [ 'maids' ],
+ 'void' ],
+
+ [ [ 'visibility', 'pocketful', 'rectangles' ],
+ -10, 0,
+ [ 'garbages' ],
+ 'void' ],
+
+ [ [ 'sleeplessly' ],
+ 8, -4,
+ [ 'Margery', 'clearing', 'repercussion', 'clubs',
+ 'arise' ],
+ 'void' ],
+
+ [ [ 'chastises', 'recalculates' ],
+ 0, 0,
+ [ 'momentariness', 'mediates', 'accents', 'toils',
+ 'regaled' ],
+ 'void' ],
+
+ [ [ 'b', '' ],
+ 9, 8,
+ [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
+ 'scalar' ],
+
+ [ [ 'b', '' ],
+ undef, undef,
+ [ 'otrb', 'stje', 'ixrpw', 'vxfx', 'lhhf' ],
+ 'scalar' ],
+
+ [ [ 'riheb' ], -8, undef, [], 'void' ],
+
+ [ [ 'uft', 'qnxs', '' ],
+ 6, -2,
+ [ 'znp', 'mhnkh', 'bn' ],
+ 'void' ],
+ );
+
+my $testnum = 139;
+my $failed = 0;
+require POSIX; my $tmp = POSIX::tmpnam();
+foreach my $test (@tests) {
+ my $err = test_splice(@$test);
+ if (defined $err) {
+ require Data::Dumper;
+ print STDERR "failed: ", Data::Dumper::Dumper($test);
+ print STDERR "error: $err\n";
+ $failed = 1;
+ ok($testnum++, 0);
+ }
+ else { ok($testnum++, 1) }
+}
+
+if ($failed) {
+ # Not worth running the random ones
+ print STDERR 'skipping ', $testnum++, "\n";
+}
+else {
+ # A thousand randomly-generated tests
+ $failed = 0;
+ srand(0);
+ foreach (0 .. 1000 - 1) {
+ my $test = rand_test();
+ my $err = test_splice(@$test);
+ if (defined $err) {
+ require Data::Dumper;
+ print STDERR "failed: ", Data::Dumper::Dumper($test);
+ print STDERR "error: $err\n";
+ $failed = 1;
+ print STDERR "skipping any remaining random tests\n";
+ last;
+ }
+ }
+
+ ok($testnum++, not $failed);
+}
+
+die if $testnum != $total_tests + 1;
+
+exit ;
+
+# Subroutines for SPLICE testing
+
+# test_splice()
+#
+# Test the new splice() against Perl's built-in one. The first four
+# parameters are those passed to splice(), except that the lists must
+# be (explicitly) passed by reference, and are not actually modified.
+# (It's just a test!) The last argument specifies the context in
+# which to call the functions: 'list', 'scalar', or 'void'.
+#
+# Returns:
+# undef, if the two splices give the same results for the given
+# arguments and context;
+#
+# an error message showing the difference, otherwise.
+#
+# Reads global variable $tmp.
+#
+sub test_splice {
+ die 'usage: test_splice(array, offset, length, list, context)' if @_ != 5;
+ my ($array, $offset, $length, $list, $context) = @_;
+ my @array = @$array;
+ my @list = @$list;
+
+ open(TEXT, ">$tmp") or die "cannot write to $tmp: $!";
+ foreach (@array) { print TEXT "$_\n" }
+ close TEXT or die "cannot close $tmp: $!";
+
+ my @h;
+ my $H = tie @h, 'DB_File', $tmp, O_RDWR, 0644, $DB_RECNO
+ or die "cannot open $tmp: $!";
+
+ return "basic DB_File sanity check failed"
+ if list_diff(\@array, \@h);
+
+ # Output from splice():
+ # Returned value (munged a bit), error msg, warnings
+ #
+ my ($s_r, $s_error, @s_warnings);
+
+ my $gather_warning = sub { push @s_warnings, $_[0] };
+ if ($context eq 'list') {
+ my @r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ @r = splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = \@r;
+ }
+ elsif ($context eq 'scalar') {
+ my $r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ $r = splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = [ $r ];
+ }
+ elsif ($context eq 'void') {
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ splice @array, $offset, $length, @list;
+ };
+ $s_error = $@;
+ $s_r = [];
+ }
+ else {
+ die "bad context $context";
+ }
+
+ foreach ($s_error, @s_warnings) {
+ chomp;
+ s/ at \S+ line \d+\.$//;
+ }
+
+ # Now do the same for DB_File's version of splice
+ my ($ms_r, $ms_error, @ms_warnings);
+ $gather_warning = sub { push @ms_warnings, $_[0] };
+ if ($context eq 'list') {
+ my @r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ @r = splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = \@r;
+ }
+ elsif ($context eq 'scalar') {
+ my $r;
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ $r = splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = [ $r ];
+ }
+ elsif ($context eq 'void') {
+ eval {
+ local $SIG{__WARN__} = $gather_warning;
+ splice @h, $offset, $length, @list;
+ };
+ $ms_error = $@;
+ $ms_r = [];
+ }
+ else {
+ die "bad context $context";
+ }
+
+ foreach ($ms_error, @ms_warnings) {
+ chomp;
+ s/ at \S+ line \d+\.?$//;
+ }
+
+ return "different errors: '$s_error' vs '$ms_error'"
+ if $s_error ne $ms_error;
+ return('different return values: ' . Dumper($s_r) . ' vs ' . Dumper($ms_r))
+ if list_diff($s_r, $ms_r);
+ return('different changed list: ' . Dumper(\@array) . ' vs ' . Dumper(\@h))
+ if list_diff(\@array, \@h);
+
+ if ((scalar @s_warnings) != (scalar @ms_warnings)) {
+ return 'different number of warnings';
+ }
+
+ while (@s_warnings) {
+ my $sw = shift @s_warnings;
+ my $msw = shift @ms_warnings;
+
+ if (defined $sw and defined $msw) {
+ $msw =~ s/ \(.+\)$//;
+ $msw =~ s/ in splice$// if $] < 5.006;
+ if ($sw ne $msw) {
+ return "different warning: '$sw' vs '$msw'";
+ }
+ }
+ elsif (not defined $sw and not defined $msw) {
+ # Okay.
+ }
+ else {
+ return "one warning defined, another undef";
+ }
+ }
+
+ undef $H;
+ untie @h;
+
+ open(TEXT, $tmp) or die "cannot open $tmp: $!";
+ @h = <TEXT>; chomp @h;
+ close TEXT or die "cannot close $tmp: $!";
+ return('list is different when re-read from disk: '
+ . Dumper(\@array) . ' vs ' . Dumper(\@h))
+ if list_diff(\@array, \@h);
+
+ return undef; # success
+}
+
+
+# list_diff()
+#
+# Do two lists differ?
+#
+# Parameters:
+# reference to first list
+# reference to second list
+#
+# Returns true iff they differ. Only works for lists of (string or
+# undef).
+#
+# Surely there is a better way to do this?
+#
+sub list_diff {
+ die 'usage: list_diff(ref to first list, ref to second list)'
+ if @_ != 2;
+ my ($a, $b) = @_;
+ my @a = @$a; my @b = @$b;
+ return 1 if (scalar @a) != (scalar @b);
+ for (my $i = 0; $i < @a; $i++) {
+ my ($ae, $be) = ($a[$i], $b[$i]);
+ if (defined $ae and defined $be) {
+ return 1 if $ae ne $be;
+ }
+ elsif (not defined $ae and not defined $be) {
+ # Two undefined values are 'equal'
+ }
+ else {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+# rand_test()
+#
+# Think up a random ARRAY, OFFSET, LENGTH, LIST, and context.
+# ARRAY or LIST might be empty, and OFFSET or LENGTH might be
+# undefined. Return a 'test' - a listref of these five things.
+#
+sub rand_test {
+ die 'usage: rand_test()' if @_;
+ my @contexts = qw<list scalar void>;
+ my $context = $contexts[int(rand @contexts)];
+ return [ rand_list(),
+ (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
+ (rand() < 0.5) ? (int(rand(20)) - 10) : undef,
+ rand_list(),
+ $context ];
+}
+
+
+sub rand_list {
+ die 'usage: rand_list()' if @_;
+ my @r;
+
+ while (rand() > 0.1 * (scalar @r + 1)) {
+ push @r, rand_word();
+ }
+ return \@r;
+}
+
+
+sub rand_word {
+ die 'usage: rand_word()' if @_;
+ my $r = '';
+ my @chars = qw<a b c d e f g h i j k l m n o p q r s t u v w x y z>;
+ while (rand() > 0.1 * (length($r) + 1)) {
+ $r .= $chars[int(rand(scalar @chars))];
+ }
+ return $r;
+}
diff --git a/db/perl/DB_File/typemap b/db/perl/DB_File/typemap
new file mode 100644
index 000000000..55439ee76
--- /dev/null
+++ b/db/perl/DB_File/typemap
@@ -0,0 +1,44 @@
+# typemap for Perl 5 interface to Berkeley
+#
+# written by Paul Marquess <Paul.Marquess@btinternet.com>
+# last modified 10th December 2000
+# version 1.74
+#
+#################################### DB SECTION
+#
+#
+
+u_int T_U_INT
+DB_File T_PTROBJ
+DBT T_dbtdatum
+DBTKEY T_dbtkeydatum
+
+INPUT
+T_dbtkeydatum
+ ckFilter($arg, filter_store_key, \"filter_store_key\");
+ DBT_clear($var) ;
+ if (db->type != DB_RECNO) {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+ else {
+ Value = GetRecnoKey(aTHX_ db, SvIV($arg)) ;
+ $var.data = & Value;
+ $var.size = (int)sizeof(recno_t);
+ }
+T_dbtdatum
+ ckFilter($arg, filter_store_value, \"filter_store_value\");
+ DBT_clear($var) ;
+ if (SvOK($arg)) {
+ $var.data = SvPV($arg, PL_na);
+ $var.size = (int)PL_na;
+ }
+
+OUTPUT
+
+T_dbtkeydatum
+ OutputKey($arg, $var)
+T_dbtdatum
+ OutputValue($arg, $var)
+T_PTROBJ
+ sv_setref_pv($arg, dbtype, (void*)$var);
diff --git a/db/perl/DB_File/version.c b/db/perl/DB_File/version.c
new file mode 100644
index 000000000..48c29a0e6
--- /dev/null
+++ b/db/perl/DB_File/version.c
@@ -0,0 +1,82 @@
+/*
+
+ version.c -- Perl 5 interface to Berkeley DB
+
+ written by Paul Marquess <Paul.Marquess@btinternet.com>
+ last modified 30th July 2001
+ version 1.78
+
+ All comments/suggestions/problems are welcome
+
+ Copyright (c) 1995-2001 Paul Marquess. All rights reserved.
+ This program is free software; you can redistribute it and/or
+ modify it under the same terms as Perl itself.
+
+ Changes:
+ 1.71 - Support for Berkeley DB version 3.
+ Support for Berkeley DB 2/3's backward compatability mode.
+ 1.72 - No change.
+ 1.73 - Added support for threading
+ 1.74 - Added Perl core patch 7801.
+
+
+*/
+
+#define PERL_NO_GET_CONTEXT
+#include "EXTERN.h"
+#include "perl.h"
+#include "XSUB.h"
+
+#include <db.h>
+
+void
+#ifdef CAN_PROTOTYPE
+__getBerkeleyDBInfo(void)
+#else
+__getBerkeleyDBInfo()
+#endif
+{
+#ifdef dTHX
+ dTHX;
+#endif
+ SV * version_sv = perl_get_sv("DB_File::db_version", GV_ADD|GV_ADDMULTI) ;
+ SV * ver_sv = perl_get_sv("DB_File::db_ver", GV_ADD|GV_ADDMULTI) ;
+ SV * compat_sv = perl_get_sv("DB_File::db_185_compat", GV_ADD|GV_ADDMULTI) ;
+
+#ifdef DB_VERSION_MAJOR
+ int Major, Minor, Patch ;
+
+ (void)db_version(&Major, &Minor, &Patch) ;
+
+ /* Check that the versions of db.h and libdb.a are the same */
+ if (Major != DB_VERSION_MAJOR || Minor != DB_VERSION_MINOR
+ || Patch != DB_VERSION_PATCH)
+ croak("\nDB_File needs compatible versions of libdb & db.h\n\tyou have db.h version %d.%d.%d and libdb version %d.%d.%d\n",
+ DB_VERSION_MAJOR, DB_VERSION_MINOR, DB_VERSION_PATCH,
+ Major, Minor, Patch) ;
+
+ /* check that libdb is recent enough -- we need 2.3.4 or greater */
+ if (Major == 2 && (Minor < 3 || (Minor == 3 && Patch < 4)))
+ croak("DB_File needs Berkeley DB 2.3.4 or greater, you have %d.%d.%d\n",
+ Major, Minor, Patch) ;
+
+ {
+ char buffer[40] ;
+ sprintf(buffer, "%d.%d", Major, Minor) ;
+ sv_setpv(version_sv, buffer) ;
+ sprintf(buffer, "%d.%03d%03d", Major, Minor, Patch) ;
+ sv_setpv(ver_sv, buffer) ;
+ }
+
+#else /* ! DB_VERSION_MAJOR */
+ sv_setiv(version_sv, 1) ;
+ sv_setiv(ver_sv, 1) ;
+#endif /* ! DB_VERSION_MAJOR */
+
+#ifdef COMPAT185
+ sv_setiv(compat_sv, 1) ;
+#else /* ! COMPAT185 */
+ sv_setiv(compat_sv, 0) ;
+#endif /* ! COMPAT185 */
+
+}
diff --git a/db/rep/rep_method.c b/db/rep/rep_method.c
new file mode 100644
index 000000000..859158d2a
--- /dev/null
+++ b/db/rep/rep_method.c
@@ -0,0 +1,432 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: rep_method.c,v 1.23 2001/10/10 02:57:41 margo Exp ";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+/* XXX */
+#ifdef REP_DEBUG
+#include <pthread.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "log.h"
+#include "rep.h"
+
+#ifdef HAVE_RPC
+#include "rpc_client_ext.h"
+#endif
+
+static int __rep_elect __P((DB_ENV *,
+ int, int, u_int32_t, u_int32_t, int *, int *));
+static int __rep_elect_init __P((DB_ENV *, DB_LSN *, int, int, int *));
+static int __rep_set_rep_transport __P((DB_ENV *, int, void *,
+ int (*)(DB_ENV *, void *, const DBT *, DBT *, db_reptype_t, int)));
+static int __rep_start __P((DB_ENV *, DBT *, u_int32_t));
+static int __rep_wait __P((DB_ENV *,
+ u_int32_t, u_int32_t, int *, int *, u_int32_t));
+
+/*
+ * __rep_dbenv_create --
+ * Replication-specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: int __rep_dbenv_create __P((DB_ENV *));
+ */
+int
+__rep_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ int ret;
+
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ if ((ret = __os_calloc(dbenv, 1, sizeof(DB_REP), &db_rep)) != 0)
+ return (ret);
+ dbenv->rep_handle = db_rep;
+
+ /* Initialize the per-process replication structure. */
+ db_rep->rep_send = NULL;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->rep_elect = __dbcl_rep_elect;
+ dbenv->rep_start = __dbcl_rep_start;
+ dbenv->rep_process_message = __dbcl_rep_process_message;
+ dbenv->set_rep_transport = __dbcl_rep_set_rep_transport;
+ } else
+#endif
+ {
+ dbenv->rep_elect = __rep_elect;
+ dbenv->rep_process_message = __rep_process_message;
+ dbenv->rep_start = __rep_start;
+ dbenv->set_rep_transport = __rep_set_rep_transport;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_start --
+ * Become a master or client, and start sending messages to participate
+ * in the replication environment. Must be called after the environment
+ * is open.
+ */
+static int
+__rep_start(dbenv, dbt, flags)
+ DB_ENV *dbenv;
+ DBT *dbt;
+ u_int32_t flags;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int announce, init_db, ret;
+
+ ENV_ILLEGAL_BEFORE_OPEN(dbenv, "rep_start");
+ PANIC_CHECK(dbenv);
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ if ((ret = __db_fchk(dbenv, "DB_ENV->rep_start", flags,
+ DB_REP_CLIENT | DB_REP_LOGSONLY | DB_REP_MASTER)) != 0)
+ return (ret);
+
+ /* Exactly one of CLIENT and MASTER must be specified. */
+ if ((!LF_ISSET(DB_REP_CLIENT) && (!LF_ISSET(DB_REP_MASTER))) ||
+ (LF_ISSET(DB_REP_CLIENT) && LF_ISSET(DB_REP_MASTER)))
+ return (__db_ferr(dbenv, "DB_ENV->rep_start", 1));
+
+ /* Masters can't be logs-only. */
+ if (LF_ISSET(DB_REP_MASTER) && LF_ISSET(DB_REP_LOGSONLY))
+ return (__db_ferr(dbenv, "DB_ENV->rep_start", 1));
+
+ /* We need a transport function. */
+ if (db_rep->rep_send == NULL) {
+ __db_err(dbenv,
+ "DB_ENV->set_rep_transport must be called before %s",
+ "DB_ENV->rep_start");
+ return (EINVAL);
+ }
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ if (rep->eid == DB_INVALID_EID)
+ rep->eid = dbenv->rep_eid;
+
+ if (LF_ISSET(DB_REP_MASTER)) {
+ if (db_rep->rep_db != NULL) {
+ ret = db_rep->rep_db->close(db_rep->rep_db, DB_NOSYNC);
+ db_rep->rep_db = NULL;
+ }
+
+ F_CLR(dbenv, DB_ENV_REP_CLIENT);
+ if (!F_ISSET(rep, REP_F_MASTER)) {
+ /* Master is not yet set. */
+ if (F_ISSET(rep, REP_ISCLIENT)) {
+ F_CLR(rep, REP_ISCLIENT);
+ rep->gen = ++rep->w_gen;
+ } else if (rep->gen == 0)
+ rep->gen = 1;
+ }
+ F_SET(rep, REP_F_MASTER);
+ F_SET(dbenv, DB_ENV_REP_MASTER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ } else {
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER) ||
+ F_ISSET(rep, REP_F_MASTER)) {
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (DB_REP_NEWMASTER);
+ }
+
+ F_SET(dbenv, DB_ENV_REP_CLIENT);
+ if (LF_ISSET(DB_REP_LOGSONLY))
+ F_SET(dbenv, DB_ENV_REP_LOGSONLY);
+
+ announce = !F_ISSET(rep, REP_ISCLIENT) ||
+ rep->master_id == DB_INVALID_EID;
+ init_db = 0;
+ if (!F_ISSET(rep, REP_ISCLIENT)) {
+ if (LF_ISSET(DB_REP_LOGSONLY))
+ F_SET(rep, REP_F_LOGSONLY);
+ else
+ F_SET(rep, REP_F_UPGRADE);
+
+ /*
+ * We initialize the client's generation number to 0.
+ * Upon startup, it looks for a master and updates the
+ * generation number as necessary, exactly as it does
+ * during normal operation and a master failure.
+ */
+ rep->gen = 0;
+ rep->master_id = DB_INVALID_EID;
+ init_db = 1;
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if ((ret = __rep_client_dbinit(dbenv, init_db)) != 0)
+ return (ret);
+
+ /*
+ * If this client created a newly replicated environment,
+ * then announce the existence of this client. The master
+ * should respond with a message that will tell this client
+ * the current generation number and the current LSN. This
+ * will allow the client to either perform recovery or
+ * simply join in.
+ */
+ if (announce)
+ ret = __rep_send_message(dbenv,
+ DB_BROADCAST_EID, REP_NEWCLIENT, NULL, dbt, 0);
+ }
+ return (ret);
+}
+
+/*
+ * __rep_set_transport --
+ * Set the transport function for replication.
+ */
+static int
+__rep_set_rep_transport(dbenv, eid, send_cookie, f_send)
+ DB_ENV *dbenv;
+ int eid;
+ void *send_cookie;
+ int (*f_send)(DB_ENV *,
+ void *, const DBT *, DBT *, u_int32_t, int);
+{
+ DB_REP *db_rep;
+ int ret;
+
+ ret = 0;
+ db_rep = dbenv->rep_handle;
+ if (db_rep == NULL)
+ return (EINVAL);
+
+ db_rep->rep_send = f_send;
+ db_rep->rep_send_data = send_cookie;
+
+ dbenv->rep_eid = eid;
+ return (ret);
+}
+
+/*
+ * __rep_elect --
+ * Called after master failure to hold/participate in an election for
+ * a new master.
+ */
+static int
+__rep_elect(dbenv, nsites, pri, wait, sleep, eidp, selfp)
+ DB_ENV *dbenv;
+ int nsites, pri;
+ u_int32_t wait, sleep;
+ int *eidp, *selfp;
+{
+ DB_LOG *dblp;
+ DB_LSN lsn;
+ DB_REP *db_rep;
+ REP *rep;
+ int in_progress, ret, send_vote;
+
+ *selfp = 0;
+
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ if ((ret = __rep_elect_init(dbenv,
+ &lsn, nsites, pri, &in_progress)) != 0)
+ return (ret);
+
+#ifdef REP_DEBUG
+if (!in_progress)
+ fprintf(stderr, "%lx Beginning an election\n", (long)pthread_self());
+#endif
+ if (!in_progress && (ret = __rep_send_message(dbenv,
+ DB_BROADCAST_EID, REP_ELECT, NULL, NULL, 0)) != 0)
+ goto err;
+
+ /* Now send vote */
+ if ((ret = __rep_send_vote(dbenv, &lsn, nsites, pri)) != 0)
+ goto err;
+
+ ret = __rep_wait(dbenv, wait, sleep, selfp, eidp, REP_F_EPHASE1);
+ switch (ret) {
+ case 0:
+ /* Check if election complete or phase complete. */
+ if (*eidp != DB_INVALID_EID)
+ return (0);
+ goto phase2;
+ case DB_TIMEOUT:
+ break;
+ default:
+ goto err;
+ }
+ /*
+ * If we got here, we haven't heard from everyone, but we've
+ * run out of time, so it's time to decide if we have enough
+ * votes to pick a winner and if so, to send out a vote to
+ * the winner.
+ */
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ send_vote = DB_INVALID_EID;
+ if (rep->sites > rep->nsites / 2) {
+ /* We think we've seen enough to cast a vote. */
+ send_vote = rep->winner;
+ if (rep->winner == rep->eid)
+ rep->votes++;
+ F_CLR(rep, REP_F_EPHASE1);
+ F_SET(rep, REP_F_EPHASE2);
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (send_vote == DB_INVALID_EID) {
+ /* We do not have enough votes to elect. */
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Not enough votes to elect: received %d of %d\n",
+(long)pthread_self(), rep->sites, rep->nsites);
+#endif
+ ret = DB_REP_UNAVAIL;
+ goto err;
+
+ }
+#ifdef REP_DEBUG
+if (send_vote != rep->eid)
+ fprintf(stderr, "%lx Sending vote\n", (long)pthread_self());
+#endif
+
+ if (send_vote != rep->eid && (ret = __rep_send_message(dbenv,
+ send_vote, REP_VOTE2, NULL, NULL, 0)) != 0)
+ goto err;
+
+phase2: ret = __rep_wait(dbenv, wait, sleep, selfp, eidp, REP_F_EPHASE2);
+ switch (ret) {
+ case 0:
+ return (0);
+ case DB_TIMEOUT:
+ ret = DB_REP_UNAVAIL;
+ break;
+ default:
+ goto err;
+ }
+
+err: MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ ELECTION_DONE(rep);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Ended election with %d\n", (long)pthread_self(), ret);
+#endif
+ return (ret);
+}
+
+/*
+ * __rep_elect_init
+ * Initialize an election. Sets beginp non-zero if the election is
+ * already in progress; makes it 0 otherwise.
+ */
+int
+__rep_elect_init(dbenv, lsnp, nsites, pri, beginp)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ int nsites, pri, *beginp;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int ret, *tally;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ ret = 0;
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ *beginp = IN_ELECTION(rep);
+ if (!*beginp) {
+ F_SET(rep, REP_F_EPHASE1);
+ rep->master_id = DB_INVALID_EID;
+ if (nsites > rep->asites &&
+ (ret = __rep_grow_sites(dbenv, nsites)) != 0)
+ goto err;
+ rep->nsites = nsites;
+ rep->priority = pri;
+ rep->votes = 0;
+
+ /* We have always heard from ourselves. */
+ rep->sites = 1;
+ tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
+ tally[0] = rep->eid;
+
+ if (pri != 0) {
+ /* Make ourselves the winner to start. */
+ rep->winner = rep->eid;
+ rep->w_priority = pri;
+ rep->w_gen = rep->gen;
+ rep->w_lsn = *lsnp;
+ } else {
+ rep->winner = DB_INVALID_EID;
+ rep->w_priority = 0;
+ rep->w_gen = 0;
+ ZERO_LSN(rep->w_lsn);
+ }
+ }
+err: MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+static int
+__rep_wait(dbenv, wait, sleep, selfp, eidp, flags)
+ DB_ENV *dbenv;
+ u_int32_t wait, sleep;
+ int *selfp, *eidp;
+ u_int32_t flags;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int done, ret;
+
+ done = 0;
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ while (wait > 0) {
+ if ((ret = __os_sleep(dbenv, 0, sleep)) != 0)
+ return (ret);
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ done = !F_ISSET(rep, flags) && rep->master_id != DB_INVALID_EID;
+
+ *eidp = rep->master_id;
+ if (rep->eid == rep->master_id)
+ *selfp = 1;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (done)
+ return (0);
+
+ if (wait > sleep)
+ wait -= sleep;
+ else
+ wait = 0;
+ }
+ return (DB_TIMEOUT);
+}
diff --git a/db/rep/rep_record.c b/db/rep/rep_record.c
new file mode 100644
index 000000000..80c87d55e
--- /dev/null
+++ b/db/rep/rep_record.c
@@ -0,0 +1,1010 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: rep_record.c,v 1.43 2001/10/10 02:57:42 margo Exp ";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "log.h"
+#include "txn.h"
+#include "rep.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "db_shash.h"
+#include "lock.h"
+
+static int __rep_apply __P((DB_ENV *, REP_CONTROL *, DBT *));
+static int __rep_bt_cmp __P((DB *, const DBT *, const DBT *));
+static int __rep_process_txn __P((DB_ENV *, DBT *));
+
+#define IS_SIMPLE(R) \
+ ((R) != DB_txn_regop && (R) != DB_txn_ckp && (R) != DB_log_register)
+
+/*
+ * This is a bit of a hack. If we set the offset to
+ * be the sizeof the persistent log structure, then
+ * we'll match the correct LSN on the next log write
+ * and the guts of the log system will take care of
+ * actually writing the persistent header.
+ *
+ * And now it's even more of a hack. If lp->ready_lsn is [1][0], we need
+ * to "change" to the first log file (we currently have none).
+ * However, in this situation, we don't want to wind up at LSN
+ * [2][whatever], we want to wind up at LSN [1][whatever],
+ * so don't increment lsn.file.
+ */
+#define CHANGE_FILES do { \
+ if (!(lp->ready_lsn.file == 1 && lp->ready_lsn.offset == 0)) \
+ lp->ready_lsn.file = rp->lsn.file + 1; \
+ lp->ready_lsn.offset = sizeof(struct __log_persist) + \
+ sizeof(struct __hdr); \
+ if ((ret = __log_newfh(dblp)) != 0) \
+ goto err; \
+ /* Make this evaluate to a simple rectype. */ \
+ rectype = 0; \
+} while (0)
+
+/*
+ * __rep_process_message --
+ *
+ * This routine takes an incoming message and processes it.
+ *
+ * control: contains the control fields from the record
+ * rec: contains the actual record
+ * eidp: contains the machine id of the sender of the message;
+ * in the case of a DB_NEWMASTER message, returns the eid
+ * of the new master.
+ *
+ * PUBLIC: int __rep_process_message __P((DB_ENV *, DBT *, DBT *, int *));
+ */
+int
+__rep_process_message(dbenv, rec, control, eidp)
+ DB_ENV *dbenv;
+ DBT *rec, *control;
+ int *eidp;
+{
+ DBT *d, data_dbt, mylog;
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN lsn;
+ DB_REP *db_rep;
+ LOG *lp;
+ REP *rep;
+ REP_CONTROL *rp;
+ REP_VOTE_INFO *vi;
+ u_int32_t gen, type;
+ int done, i, master, eid, old, recovering, ret, t_ret, *tally;
+
+ PANIC_CHECK(dbenv);
+
+ ret = 0;
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ gen = rep->gen;
+ eid = rep->eid;
+ recovering = F_ISSET(rep, REP_F_RECOVER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ /*
+ * dbenv->rep_db is the handle for the repository used for applying log
+ * records.
+ */
+ rp = (REP_CONTROL *)control->data;
+
+ /*
+ * Check for generation number matching. Ignore any old messages
+ * except requests for ALIVE since the sender needs those to
+ * sync back up. If the message is newer, then we are out of
+ * sync and need to catch up with the rest of the system.
+ */
+ if (rp->gen < gen && rp->rectype != REP_ALIVE_REQ &&
+ rp->rectype != REP_NEWCLIENT)
+ return (0);
+ if (rp->gen > gen && rp->rectype != REP_ALIVE &&
+ rp->rectype != REP_NEWMASTER)
+ return (__rep_send_message(dbenv,
+ DB_BROADCAST_EID, REP_MASTER_REQ, NULL, NULL, 0));
+
+ /*
+ * We need to check if we're in recovery and if we are
+ * then we need to ignore any messages except VERIFY, VOTE,
+ * ELECT (the master might fail while we are recovering), and
+ * ALIVE_REQ.
+ */
+ if (recovering)
+ switch(rp->rectype) {
+ case REP_ALIVE:
+ case REP_ALIVE_REQ:
+ case REP_ELECT:
+ case REP_NEWCLIENT:
+ case REP_NEWMASTER:
+ case REP_NEWSITE:
+ case REP_VERIFY:
+ case REP_VOTE1:
+ case REP_VOTE2:
+ break;
+ default:
+ return (0);
+ }
+
+ switch(rp->rectype) {
+ case REP_ALIVE:
+ ANYSITE(dbenv);
+ if (rp->gen > gen && rp->flags)
+ return (__rep_new_master(dbenv, rp, *eidp));
+ break;
+ case REP_ALIVE_REQ:
+ ANYSITE(dbenv);
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = ((LOG *)dblp->reginfo.primary)->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_ALIVE, &lsn, NULL,
+ F_ISSET(dbenv, DB_ENV_REP_MASTER) ? 1 : 0));
+ case REP_ALL_REQ:
+ MASTER_ONLY(dbenv);
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ lsn = rp->lsn;
+ for (ret = logc->get(logc, &rp->lsn, &data_dbt, DB_SET);
+ ret == 0;
+ ret = logc->get(logc, &lsn, &data_dbt, DB_NEXT)) {
+ if (lsn.offset == 0)
+ /*
+ * We are starting a new file; we have to
+ * do that with a NEWFILE message, not a
+ * regular log message, else we end up with
+ * two persist structures in the log.
+ */
+ ret = __rep_send_message(dbenv,
+ DB_BROADCAST_EID, REP_NEWFILE,
+ &lsn, NULL, 0);
+ else
+ ret = __rep_send_message(dbenv, *eidp,
+ REP_LOG, &lsn, &data_dbt, 0);
+ if (ret != 0)
+ break;
+ }
+
+ if (ret == DB_NOTFOUND)
+ ret = 0;
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_ELECT:
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ ret = IN_ELECTION(rep) ? 0 : DB_REP_HOLDELECTION;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+#ifdef NOTYET
+ case REP_FILE: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_FILE_REQ:
+ MASTER_ONLY(dbenv);
+ return (__rep_send_file(dbenv, rec, *eidp));
+ break;
+#endif
+ case REP_LOG:
+ CLIENT_ONLY(dbenv);
+ return (__rep_apply(dbenv, rp, rec));
+ case REP_LOG_REQ:
+ MASTER_ONLY(dbenv);
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ if ((ret = logc->get(logc, &rp->lsn, &data_dbt, DB_SET)) == 0)
+ ret = __rep_send_message(
+ dbenv, *eidp, REP_LOG, &rp->lsn, &data_dbt, 0);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+ case REP_NEWSITE:
+ /* This is a rebroadcast; simply tell the application. */
+ return (DB_REP_NEWSITE);
+ case REP_NEWCLIENT:
+ /*
+ * This message was received and should have resulted in the
+ * application entering the machine ID in its machine table.
+ * We respond to this with an ALIVE to send relevant information
+ * to the new client. But first, broadcast the new client's
+ * record to all the clients.
+ */
+ if ((ret = __rep_send_message(dbenv,
+ DB_BROADCAST_EID, REP_NEWSITE, &rp->lsn, rec, 0)) != 0)
+ goto err;
+
+ if (F_ISSET(dbenv, DB_ENV_REP_CLIENT))
+ return (0);
+
+ /* FALLTHROUGH */
+ case REP_MASTER_REQ:
+ MASTER_ONLY(dbenv);
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ break;
+ case REP_NEWFILE:
+ CLIENT_ONLY(dbenv);
+ return (__rep_apply(dbenv, rp, rec));
+
+ case REP_NEWMASTER:
+ ANYSITE(dbenv);
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER) &&
+ *eidp != dbenv->rep_eid)
+ return (DB_REP_DUPMASTER);
+ return (__rep_new_master(dbenv, rp, *eidp));
+ break;
+ case REP_PAGE: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_PAGE_REQ: /* TODO */
+ MASTER_ONLY(dbenv);
+ break;
+ case REP_PLIST: /* TODO */
+ CLIENT_ONLY(dbenv);
+ break;
+ case REP_PLIST_REQ: /* TODO */
+ MASTER_ONLY(dbenv);
+ break;
+ case REP_VERIFY:
+ CLIENT_ONLY(dbenv);
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ memset(&mylog, 0, sizeof(mylog));
+ if ((ret = logc->get(logc, &rp->lsn, &mylog, DB_SET)) != 0)
+ goto rep_verify_err;
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ if (mylog.size == rec->size &&
+ memcmp(mylog.data, rec->data, rec->size) == 0) {
+ ret = __db_apprec(dbenv, &rp->lsn, 0);
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ F_CLR(rep, REP_F_RECOVER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = __rep_send_message(dbenv, rep->master_id,
+ REP_ALL_REQ, &rp->lsn, NULL, 0);
+ } else if ((ret = logc->get(logc, &lsn, &mylog, DB_PREV)) == 0)
+ ret = __rep_send_message(dbenv,
+ *eidp, REP_VERIFY_REQ, &lsn, NULL, 0);
+rep_verify_err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ goto err;
+ case REP_VERIFY_FAIL:
+ return (DB_REP_OUTDATED);
+ case REP_VERIFY_REQ:
+ MASTER_ONLY(dbenv);
+ type = REP_VERIFY;
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ d = &data_dbt;
+ memset(d, 0, sizeof(data_dbt));
+ ret = logc->get(logc, &rp->lsn, d, DB_SET);
+ /*
+ * If the LSN was invalid, then we might get a not
+ * found, we might get an EIO, we could get anything.
+ * If we get a DB_NOTFOUND, then there is a chance that
+ * the LSN comes before the first file present in which
+ * case we need to return a fail so that the client can return
+ * a DB_OUTDATED.
+ */
+ if (ret == DB_NOTFOUND &&
+ __log_is_outdated(dbenv, rp->lsn.file, &old) == 0 &&
+ old != 0)
+ type = REP_VERIFY_FAIL;
+
+ if (ret != 0)
+ d = NULL;
+
+ ret = __rep_send_message(dbenv, *eidp, type, &rp->lsn, d, 0);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ goto err;
+ case REP_VOTE1:
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Master received vote\n", (long)pthread_self());
+#endif
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+
+ vi = (REP_VOTE_INFO *)rec->data;
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+
+ /*
+ * If you get a vote and you're not in an election, simply
+ * return an indicator to hold an election which will trigger
+ * this site to send its vote again.
+ */
+ if (!IN_ELECTION(rep)) {
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Not in election, but received vote1\n", (long)pthread_self());
+#endif
+ ret = DB_REP_HOLDELECTION;
+ goto unlock;
+ }
+
+ if (F_ISSET(rep, REP_F_EPHASE2))
+ goto unlock;
+
+ /* Check if this site knows about more sites than we do. */
+ if (vi->nsites > rep->nsites)
+ rep->nsites = vi->nsites;
+
+ /* Check if we've heard from this site already. */
+ tally = R_ADDR((REGINFO *)dbenv->reginfo, rep->tally_off);
+ for (i = 0; i < rep->sites; i++) {
+ if (tally[i] == *eidp)
+ /* Duplicate vote. */
+ goto unlock;
+ }
+
+ /*
+ * We are keeping vote, let's see if that changes our count of
+ * the number of sites.
+ */
+ if (rep->sites + 1 > rep->nsites)
+ rep->nsites = rep->sites + 1;
+ if (rep->nsites > rep->asites &&
+ (ret = __rep_grow_sites(dbenv, rep->nsites)) != 0)
+ goto unlock;
+
+ tally[rep->sites] = *eidp;
+ rep->sites++;
+
+ /*
+ * Change winners if the incoming record has a higher
+ * priority, or an equal priority but a larger LSN.
+ */
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Existing vote: (eid)%d (pri)%d (gen)%d [%d,%d]\n", (long)pthread_self(), rep->winner, rep->w_priority, rep->w_gen, rep->w_lsn.file, rep->w_lsn.offset);
+fprintf(stderr, "%lx Incoming vote: (eid)%d (pri)%d (gen)%d [%d,%d]\n", (long)pthread_self(), *eidp, vi->priority, rp->gen, rp->lsn.file, rp->lsn.offset);
+#endif
+ if (vi->priority > rep->w_priority ||
+ (vi->priority == rep->w_priority &&
+ log_compare(&rp->lsn, &rep->w_lsn) > 0)) {
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Taking new vote\n", (long)pthread_self());
+#endif
+ rep->winner = *eidp;
+ rep->w_priority = vi->priority;
+ rep->w_lsn = rp->lsn;
+ rep->w_gen = rp->gen;
+ }
+ master = rep->winner;
+ lsn = rep->w_lsn;
+ done = rep->sites == rep->nsites;
+ if (done) {
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Consider election phase1 done\n", (long)pthread_self());
+#endif
+ F_CLR(rep, REP_F_EPHASE1);
+ F_SET(rep, REP_F_EPHASE2);
+ }
+ if (done && master == rep->eid) {
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Entering phase 2 casting vote for ourselves.\n",
+(long)pthread_self());
+#endif
+ rep->votes++;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (0);
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (done) {
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Entering phase 2 with voting for %d\n",
+(long)pthread_self(), master);
+#endif
+ /* Someone else gets the vote. */
+ return (__rep_send_message(dbenv, master, REP_VOTE2,
+ NULL, NULL, 0));
+ }
+ /* Election is still going on. */
+ break;
+ case REP_VOTE2:
+ if (F_ISSET(dbenv, DB_ENV_REP_MASTER)) {
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Master received vote\n", (long)pthread_self());
+#endif
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ return (__rep_send_message(dbenv,
+ *eidp, REP_NEWMASTER, &lsn, NULL, 0));
+ }
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Got a vote\n", (long)pthread_self());
+#endif
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ if (!IN_ELECTION(rep) && rep->master_id != DB_INVALID_EID) {
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Not in election, but received vote2\n", (long)pthread_self());
+#endif
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (DB_REP_HOLDELECTION);
+ }
+ /* avoid counting duplicates. */
+ rep->votes++;
+ done = rep->votes > rep->nsites / 2;
+ if (done) {
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Got enough votes to win\n", (long)pthread_self());
+#endif
+ rep->master_id = rep->eid;
+ rep->gen = rep->w_gen + 1;
+ ELECTION_DONE(rep);
+ F_CLR(rep, REP_F_UPGRADE);
+ F_SET(rep, REP_F_MASTER);
+ *eidp = rep->master_id;
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx Election done; winner is %d\n", (long)pthread_self(), rep->master_id);
+#endif
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ if (done) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ /* Declare me the winner. */
+#ifdef REP_DEBUG
+fprintf(stderr, "%lx I won, sending NEWMASTER\n", (long)pthread_self());
+#endif
+ if ((ret = __rep_send_message(dbenv, DB_BROADCAST_EID,
+ REP_NEWMASTER, &lsn, NULL, 0)) != 0)
+ break;
+ return (DB_REP_NEWMASTER);
+ }
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (0);
+
+unlock: MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+err: return (ret);
+}
+
+/*
+ * __rep_apply --
+ *
+ * Handle incoming log records on a client, applying when possible and
+ * entering into the bookkeeping table otherwise. This is the guts of
+ * the routine that handles the state machine that describes how we
+ * process and manage incoming log records.
+ */
+static int
+__rep_apply(dbenv, rp, rec)
+ DB_ENV *dbenv;
+ REP_CONTROL *rp;
+ DBT *rec;
+{
+ __txn_ckp_args ckp_args;
+ DB_REP *db_rep;
+ DBT data_dbt, key_dbt;
+ DB *dbp;
+ DBC *dbc;
+ DB_LOG *dblp;
+ DB_LSN ckp_lsn, lsn, next_lsn;
+ LOG *lp;
+ int cmp, eid, ret, retry_count, t_ret;
+ u_int32_t rectype;
+
+ db_rep = dbenv->rep_handle;
+ dbp = db_rep->rep_db;
+ dbc = NULL;
+ ret = 0;
+ retry_count = 0;
+ memset(&key_dbt, 0, sizeof(key_dbt));
+ memset(&data_dbt, 0, sizeof(data_dbt));
+
+ /*
+ * If this is a log record and it's the next one in line, simply
+ * write it to the log. If it's a "normal" log record, i.e., not
+ * a COMMIT or CHECKPOINT or something that needs immediate processing,
+ * just return. If it's a COMMIT, CHECKPOINT or LOG_REGISTER (i.e.,
+ * not SIMPLE), handle it now. If it's a NEWFILE record, then we
+ * have to be prepared to deal with a logfile change.
+ */
+ dblp = dbenv->lg_handle;
+ R_LOCK(dbenv, &dblp->reginfo);
+ lp = dblp->reginfo.primary;
+ cmp = log_compare(&rp->lsn, &lp->ready_lsn);
+
+ /*
+ * This is written to assume that you don't end up with a lot of
+ * records after a hole. That is, it optimizes for the case where
+ * there is only a record or two after a hole. If you have a lot
+ * of records after a hole, what you'd really want to do is write
+ * all of them and then process all the commits, checkpoints, etc.
+ * together. That is more complicated processing that we can add
+ * later if necessary.
+ *
+ * That said, I really don't want to do db operations holding the
+ * log mutex, so the synchronization here is tricky.
+ */
+ if (cmp == 0) {
+ if (rp->rectype == REP_NEWFILE) {
+ CHANGE_FILES;
+ } else {
+ ret = __log_put_int(dbenv, &rp->lsn, rec, rp->flags);
+ lp->ready_lsn = lp->lsn;
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ }
+ while (ret == 0 && IS_SIMPLE(rectype) &&
+ log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0) {
+ /*
+ * We just filled in a gap in the log record stream.
+ * Write subsequent records to the log.
+ */
+gap_check: R_UNLOCK(dbenv, &dblp->reginfo);
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ goto err;
+ if ((ret = dbc->c_get(dbc,
+ &key_dbt, &data_dbt, DB_RMW | DB_FIRST)) != 0)
+ goto err;
+ rp = (REP_CONTROL *)key_dbt.data;
+ rec = &data_dbt;
+ memcpy(&rectype, rec->data, sizeof(rectype));
+ R_LOCK(dbenv, &dblp->reginfo);
+ /*
+ * We need to check again, because it's possible that
+ * some other thread of control changed the waiting_lsn
+ * or removed that record from the database.
+ */
+ if (log_compare(&lp->ready_lsn, &rp->lsn) == 0) {
+ if (rp->rectype == REP_NEWFILE) {
+ ret = __log_put_int(dbenv,
+ &rp->lsn, &data_dbt, rp->flags);
+ lp->ready_lsn = lp->lsn;
+ } else
+ CHANGE_FILES;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if ((ret = dbc->c_del(dbc, 0)) != 0)
+ goto err;
+ ret = dbc->c_get(dbc,
+ &key_dbt, &data_dbt, DB_NEXT);
+ if (ret != DB_NOTFOUND && ret != 0)
+ goto err;
+ lsn = ((REP_CONTROL *)key_dbt.data)->lsn;
+ if ((ret = dbc->c_close(dbc)) != 0)
+ goto err;
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (ret == DB_NOTFOUND) {
+ ZERO_LSN(lp->waiting_lsn);
+ break;
+ } else
+ lp->waiting_lsn = lsn;
+ }
+ }
+ } else if (cmp > 0) {
+ /*
+ * This record isn't in sequence; add it to the table and
+ * update waiting_lsn if necessary.
+ */
+ key_dbt.data = rp;
+ key_dbt.size = sizeof(*rp);
+ next_lsn = lp->lsn;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ ret = dbp->put(dbp, NULL, &key_dbt, rec, 0);
+
+ /* Request the LSN we are still waiting for. */
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ eid = db_rep->region->master_id;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ ret = __rep_send_message(dbenv, eid, REP_LOG_REQ,
+ &next_lsn, NULL, 0);
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (ret == 0)
+ if (log_compare(&rp->lsn, &lp->waiting_lsn) < 0)
+ lp->waiting_lsn = rp->lsn;
+ }
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (ret != 0 || cmp < 0 || (cmp == 0 && IS_SIMPLE(rectype)))
+ return (ret);
+
+ /*
+ * If we got here, then we've got a log record in rp and rec that
+ * we need to process.
+ */
+ switch(rectype) {
+ case DB_txn_ckp:
+ /* Sync the memory pool and write the log record. */
+ memcpy(&ckp_lsn, (u_int8_t *)rec->data +
+ ((u_int8_t *)&ckp_args.ckp_lsn - (u_int8_t *)&ckp_args),
+ sizeof(DB_LSN));
+retry: if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY)) {
+ ret = dbenv->memp_sync(dbenv, &ckp_lsn);
+ if (ret == DB_INCOMPLETE && retry_count < 4) {
+ (void)__os_sleep(dbenv, 1 << retry_count, 0);
+ retry_count++;
+ goto retry;
+ }
+ }
+ if (ret == 0) {
+ ret = dbenv->log_put(dbenv, &lsn, rec, rp->flags);
+ }
+ break;
+ case DB_log_register:
+ /* Simply redo the operation. */
+ if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ ret = __db_dispatch(dbenv,
+ NULL, rec, &rp->lsn, DB_TXN_APPLY, NULL);
+ break;
+ case DB_txn_regop:
+ if (!F_ISSET(dbenv, DB_ENV_REP_LOGSONLY))
+ ret = __rep_process_txn(dbenv, rec);
+ break;
+ default:
+ goto err;
+ }
+
+ /* Check if we need to go back into the table. */
+ if (ret == 0) {
+ R_LOCK(dbenv, &dblp->reginfo);
+ if (log_compare(&lp->ready_lsn, &lp->waiting_lsn) == 0)
+ goto gap_check;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ }
+
+err: if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+
+ return (ret);
+}
+
+/*
+ * __rep_process_txn --
+ *
+ * This is the routine that actually gets a transaction ready for
+ * processing.
+ */
+static int
+__rep_process_txn(dbenv, commit_rec)
+ DB_ENV *dbenv;
+ DBT *commit_rec;
+{
+ DBT data_dbt;
+ DB_LOCKREQ req, *lvp;
+ DB_LOGC *logc;
+ DB_LSN prev_lsn;
+ LSN_PAGE *ap;
+ TXN_RECS recs;
+ __txn_regop_args *txn_args;
+ u_int32_t op;
+ int i, ret, t_ret;
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t dtabsize;
+
+ /*
+ * There are three phases: First, we have to traverse
+ * backwards through the log records gathering the list
+ * of all the pages accessed. Once we have this information
+ * we can acquire all the locks we need. Finally, we apply
+ * all the records in the transaction and release the locks.
+ */
+ dtab = NULL;
+
+ /* Make sure this is really a commit and not an abort! */
+ if ((ret = __txn_regop_read(dbenv, commit_rec->data, &txn_args)) != 0)
+ return (ret);
+ op = txn_args->opcode;
+ prev_lsn = txn_args->prev_lsn;
+ __os_free(dbenv, txn_args, 0);
+ if (op != TXN_COMMIT)
+ return (0);
+
+ recs.txnid = txn_args->txnid->txnid;
+ if ((ret = dbenv->lock_id(dbenv, &recs.lockid)) != 0)
+ return (ret);
+
+ /* Initialize the getpgno dispatch table. */
+ if ((ret = __rep_lockpgno_init(dbenv, &dtab, &dtabsize)) != 0)
+ goto err;
+
+ if ((ret = __rep_lockpages(dbenv,
+ dtab, NULL, &prev_lsn, &recs, recs.lockid)) != 0)
+ goto err;
+ if (recs.nalloc == 0)
+ goto err;
+
+ /* Phase 3: Apply updates and release locks. */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ for (ap = &recs.array[0], i = 0; i < recs.npages; i++, ap++) {
+ if ((ret = logc->get(logc, &ap->lsn, &data_dbt, DB_SET)) != 0)
+ goto err;
+ if ((ret = __db_dispatch(dbenv, NULL,
+ &data_dbt, &ap->lsn, DB_TXN_APPLY, NULL)) != 0)
+ goto err;
+ }
+
+err: if (recs.nalloc != 0) {
+ req.op = DB_LOCK_PUT_ALL;
+ if ((t_ret = dbenv->lock_vec(dbenv, recs.lockid,
+ DB_LOCK_FREE_LOCKER, &req, 1, &lvp)) != 0 && ret == 0)
+ ret = t_ret;
+ __os_free(dbenv, recs.array, recs.nalloc * sizeof(LSN_PAGE));
+ } else if ((t_ret =
+ dbenv->lock_id_free(dbenv, recs.lockid)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+
+ if (F_ISSET(&data_dbt, DB_DBT_REALLOC) && data_dbt.data != NULL)
+ __os_free(dbenv, data_dbt.data, 0);
+
+ if (dtab != NULL)
+ __os_free(dbenv, dtab, 0);
+
+ return (ret);
+}
+
+/*
+ * __rep_client_dbinit --
+ *
+ * Initialize the LSN database on the client side. This is called from the
+ * client initialization code. The startup flag value indicates if
+ * this is the first thread/process starting up and therefore should create
+ * the LSN database. This routine must be called once by each process acting
+ * as a client.
+ *
+ * PUBLIC: int __rep_client_dbinit __P((DB_ENV *, int));
+ */
+int
+__rep_client_dbinit(dbenv, startup)
+ DB_ENV *dbenv;
+ int startup;
+{
+ DB_REP *db_rep;
+ DB *rep_db;
+ int ret, t_ret;
+ u_int32_t flags;
+
+ PANIC_CHECK(dbenv);
+ db_rep = dbenv->rep_handle;
+ rep_db = NULL;
+
+#define REPDBNAME "__db.rep.db"
+
+ /* Check if this has already been called on this environment. */
+ if (db_rep->rep_db != NULL)
+ return (0);
+
+ if (startup) {
+ if ((ret = db_create(&rep_db, dbenv, 0)) != 0)
+ goto err;
+ /*
+ * Ignore errors, because if the file doesn't exist, this
+ * is perfectly OK.
+ */
+ (void)rep_db->remove(rep_db, REPDBNAME, NULL, 0);
+ }
+
+ if ((ret = db_create(&rep_db, dbenv, 0)) != 0)
+ goto err;
+ if ((ret = rep_db->set_bt_compare(rep_db, __rep_bt_cmp)) != 0)
+ goto err;
+
+ flags = DB_THREAD | (startup ? DB_CREATE : 0);
+ if ((ret = rep_db->open(rep_db,
+ "__db.rep.db", NULL, DB_BTREE, flags, 0)) != 0)
+ goto err;
+
+ /* Allow writes to this database on a client. */
+ F_SET(rep_db, DB_CL_WRITER);
+
+ db_rep->rep_db = rep_db;
+
+ return (0);
+err:
+ if (rep_db != NULL &&
+ (t_ret = rep_db->close(rep_db, DB_NOSYNC)) != 0 && ret == 0)
+ ret = t_ret;
+ db_rep->rep_db = NULL;
+
+ return (ret);
+}
+
+/*
+ * __rep_bt_cmp --
+ *
+ * Comparison function for the LSN table. We use the entire control
+ * structure as a key (for simplicity, so we don't have to merge the
+ * other fields in the control with the data field), but really only
+ * care about the LSNs.
+ */
+static int
+__rep_bt_cmp(dbp, dbt1, dbt2)
+ DB *dbp;
+ const DBT *dbt1, *dbt2;
+{
+ REP_CONTROL *rp1, *rp2;
+
+ COMPQUIET(dbp, NULL);
+
+ rp1 = dbt1->data;
+ rp2 = dbt2->data;
+
+ if (rp1->lsn.file > rp2->lsn.file)
+ return (1);
+
+ if (rp1->lsn.file < rp2->lsn.file)
+ return (-1);
+
+ if (rp1->lsn.offset > rp2->lsn.offset)
+ return (1);
+
+ if (rp1->lsn.offset < rp2->lsn.offset)
+ return (-1);
+
+ return (0);
+}
+
+/*
+ * __rep_send_message --
+ * This is a wrapper for sending a message. It takes care of constructing
+ * the REP_CONTROL structure and calling the user's specified send function.
+ *
+ * PUBLIC: int __rep_send_message __P((DB_ENV *, int,
+ * PUBLIC: u_int32_t, DB_LSN *, const DBT *, u_int32_t));
+ */
+int
+__rep_send_message(dbenv, eid, rtype, lsnp, dbtp, flags)
+ DB_ENV *dbenv;
+ int eid;
+ u_int32_t rtype;
+ DB_LSN *lsnp;
+ const DBT *dbtp;
+ u_int32_t flags;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ DBT cdbt, scrap_dbt;
+ REP_CONTROL cntrl;
+ u_int32_t send_flags;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+
+ /* Set up control structure. */
+ memset(&cntrl, 0, sizeof(cntrl));
+ if (lsnp == NULL)
+ ZERO_LSN(cntrl.lsn);
+ else
+ cntrl.lsn = *lsnp;
+ cntrl.rectype = rtype;
+ cntrl.flags = flags;
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ cntrl.gen = rep->gen;
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ memset(&cdbt, 0, sizeof(cdbt));
+ cdbt.data = &cntrl;
+ cdbt.size = sizeof(cntrl);
+
+ /* Don't assume the send function will be tolerant of NULL records. */
+ if (dbtp == NULL) {
+ memset(&scrap_dbt, 0, sizeof(DBT));
+ dbtp = &scrap_dbt;
+ }
+
+ send_flags = (FLUSH_ON_FLAG(flags) ? DB_REP_PERMANENT : 0);
+
+ return (db_rep->rep_send(dbenv, db_rep->rep_send_data, dbtp, &cdbt,
+ send_flags, eid));
+}
+
+#ifdef NOTYET
+static int __rep_send_file __P((DB_ENV *, DBT *, u_int32_t));
+/*
+ * __rep_send_file --
+ * Send an entire file, one block at a time.
+ */
+static int
+__rep_send_file(dbenv, rec, eid)
+ DB_ENV *dbenv;
+ DBT *rec;
+ u_int32_t eid;
+{
+ DB *dbp;
+ DB_LOCK lk;
+ DB_MPOOLFILE *mpf;
+ DBC *dbc;
+ DBT rec_dbt;
+ PAGE *pagep;
+ db_pgno_t last_pgno, pgno;
+ int ret, t_ret;
+
+ dbp = NULL;
+ dbc = NULL;
+ pagep = NULL;
+ mpf = NULL;
+ LOCK_INIT(lk);
+
+ if ((ret = db_create(&dbp, dbenv, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->open(dbp, rec->data, NULL, DB_UNKNOWN, 0, 0)) != 0)
+ goto err;
+
+ if ((ret = dbp->cursor(dbp, NULL, &dbc, 0)) != 0)
+ goto err;
+ /*
+ * Force last_pgno to some value that will let us read the meta-dat
+ * page in the following loop.
+ */
+ memset(&rec_dbt, 0, sizeof(rec_dbt));
+ last_pgno = 1;
+ for (pgno = 0; pgno <= last_pgno; pgno++) {
+ if ((ret = __db_lget(dbc, 0, pgno, DB_LOCK_READ, 0, &lk)) != 0)
+ goto err;
+
+ if ((ret = mpf->get(mpf, &pgno, 0, &pagep)) != 0)
+ goto err;
+
+ if (pgno == 0)
+ last_pgno = ((DBMETA *)pagep)->last_pgno;
+
+ rec_dbt.data = pagep;
+ rec_dbt.size = dbp->pgsize;
+ if ((ret = __rep_send_message(dbenv, eid,
+ REP_FILE, NULL, &rec_dbt, pgno == last_pgno)) != 0)
+ goto err;
+ ret = mpf->put(mpf, pagep, 0);
+ pagep = NULL;
+ if (ret != 0)
+ goto err;
+ ret = __LPUT(dbc, lk);
+ LOCK_INIT(lk);
+ if (ret != 0)
+ goto err;
+ }
+
+err: if (LOCK_ISSET(lk) && (t_ret = __LPUT(dbc, lk)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbc != NULL && (t_ret = dbc->c_close(dbc)) != 0 && ret == 0)
+ ret = t_ret;
+ if (pagep != NULL && (t_ret = mpf->put(mpf, pagep, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (dbp != NULL && (t_ret = dbp->close(dbp, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+#endif
diff --git a/db/rep/rep_region.c b/db/rep/rep_region.c
new file mode 100644
index 000000000..c42b43328
--- /dev/null
+++ b/db/rep/rep_region.c
@@ -0,0 +1,147 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ */
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: rep_region.c,v 1.13 2001/10/10 18:32:56 krinsky Exp ";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#endif
+
+#include <string.h>
+
+#include "db_int.h"
+#include "rep.h"
+#include "log.h"
+
+/*
+ * __rep_region_init --
+ * Initialize the shared memory state for the replication system.
+ *
+ * PUBLIC: int __rep_region_init __P((DB_ENV *));
+ */
+int
+__rep_region_init(dbenv)
+ DB_ENV *dbenv;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ DB_REP *db_rep;
+ REP *rep;
+ int ret;
+
+ db_rep = dbenv->rep_handle;
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ ret = 0;
+
+ MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+ if (renv->rep_off == INVALID_ROFF) {
+ /* Must create the region. */
+ if ((ret = __db_shalloc(infop->addr,
+ sizeof(REP), MUTEX_ALIGN, &rep)) != 0)
+ goto err;
+ memset(rep, 0, sizeof(*rep));
+ rep->tally_off = INVALID_ROFF;
+ renv->rep_off = R_OFFSET(infop, rep);
+ if ((ret = __db_mutex_init(dbenv,
+ &rep->mutex, renv->rep_off, 0)) != 0)
+ goto err;
+
+ /* We have the region; fill in the values. */
+ rep->eid = DB_INVALID_EID;
+ rep->master_id = DB_INVALID_EID;
+ rep->gen = 0;
+ } else
+ rep = R_ADDR(infop, renv->rep_off);
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+
+ db_rep->mutexp = &rep->mutex;
+ db_rep->region = rep;
+
+ return (0);
+
+err: MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+}
+
+/*
+ * __rep_region_destroy --
+ * Destroy any system resources allocated in the replication region.
+ *
+ * PUBLIC: int __rep_region_destroy __P((DB_ENV *));
+ */
+int
+__rep_region_destroy(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ int ret;
+
+ ret = 0;
+ db_rep = (DB_REP *)dbenv->rep_handle;
+
+ if (db_rep != NULL && db_rep->mutexp != NULL)
+ ret = __db_mutex_destroy(db_rep->mutexp);
+
+ return (ret);
+}
+
+/*
+ * __rep_dbenv_close --
+ * Replication-specific destruction of the DB_ENV structure.
+ *
+ * PUBLIC: int __rep_dbenv_close __P((DB_ENV *));
+ */
+int
+__rep_dbenv_close(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+
+ db_rep = (DB_REP *)dbenv->rep_handle;
+
+ if (db_rep != NULL) {
+ __os_free(dbenv, db_rep, sizeof(DB_REP));
+ dbenv->rep_handle = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_preclose --
+ * If we are a client, shut down our client database. Remember that the
+ * client database was opened in its own environment, not the environment
+ * for which it keeps track of information. Also, if we have a client
+ * database (i.e., rep_handle->rep_db) that means we were a client and
+ * could have applied file opens that need to be closed now. This could
+ * also mask errors where dbp's that weren't opened by us are still open,
+ * but we have no way of distingushing the two.
+ *
+ * PUBLIC: int __rep_preclose __P((DB_ENV *));
+ */
+int
+__rep_preclose(dbenv)
+ DB_ENV *dbenv;
+{
+ DB *dbp;
+ DB_REP *db_rep;
+ int ret;
+
+ ret = 0;
+ db_rep = (DB_REP *)dbenv->rep_handle;
+
+ if (db_rep != NULL && (dbp = db_rep->rep_db) != NULL) {
+ __log_close_files(dbenv);
+ ret = dbp->close(dbp, 0);
+ db_rep->rep_db = NULL;
+ }
+
+ return (ret);
+}
diff --git a/db/rep/rep_util.c b/db/rep/rep_util.c
new file mode 100644
index 000000000..d54a14c0f
--- /dev/null
+++ b/db/rep/rep_util.c
@@ -0,0 +1,623 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: rep_util.c,v 1.25 2001/10/11 01:07:16 bostic Exp ";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <stdlib.h>
+#include <string.h>
+#endif
+
+#include "db_int.h"
+#include "log.h"
+#include "rep.h"
+#include "txn.h"
+#include "db_page.h"
+#include "db_am.h"
+#include "btree.h"
+#include "hash.h"
+#include "qam.h"
+#include "db_shash.h"
+#include "lock.h"
+
+/*
+ * rep_util.c:
+ * Miscellaneous replication-related utility functions, including
+ * those called by other subsystems.
+ */
+static int __rep_apply_thread __P((DB_ENV *,
+ int (**)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ DBT *, DB_LSN *, TXN_RECS *));
+static int __rep_cmp_bylsn __P((const void *, const void *));
+static int __rep_cmp_bypage __P((const void *, const void *));
+
+/*
+ * __rep_check_alloc --
+ * Make sure the array of TXN_REC entries is of at least size n.
+ * (This function is called by the __*_getpgnos() functions in
+ * *.src.)
+ *
+ * PUBLIC: int __rep_check_alloc __P((DB_ENV *, TXN_RECS *, int));
+ */
+int
+__rep_check_alloc(dbenv, r, n)
+ DB_ENV *dbenv;
+ TXN_RECS *r;
+ int n;
+{
+ int nalloc, ret;
+
+ while (r->nalloc < r->npages + n) {
+ nalloc = r->nalloc == 0 ? 20 : r->nalloc * 2;
+
+ if ((ret = __os_realloc(dbenv, nalloc * sizeof(LSN_PAGE),
+ &r->array)) != 0)
+ return (ret);
+
+ r->nalloc = nalloc;
+ }
+
+ return (0);
+}
+
+/*
+ * __rep_new_master --
+ * Called after a master election to sync back up with a new master.
+ * It's possible that we already know of this new master in which case
+ * we don't need to do anything.
+ *
+ * This is written assuming that this message came from the master; we
+ * need to enforce that in __rep_process_record, but right now, we have
+ * no way to identify the master.
+ *
+ * PUBLIC: int __rep_new_master __P((DB_ENV *, REP_CONTROL *, int));
+ */
+int
+__rep_new_master(dbenv, cntrl, eid)
+ DB_ENV *dbenv;
+ REP_CONTROL *cntrl;
+ int eid;
+{
+ DB_LOG *dblp;
+ DB_LOGC *logc;
+ DB_LSN last_lsn, lsn;
+ DB_REP *db_rep;
+ DBT dbt;
+ LOG *lp;
+ REP *rep;
+ int change, ret, t_ret;
+
+ db_rep = dbenv->rep_handle;
+ rep = db_rep->region;
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ ELECTION_DONE(rep);
+ change = rep->gen != cntrl->gen || rep->master_id != eid;
+ if (change) {
+ rep->gen = cntrl->gen;
+ rep->master_id = eid;
+ F_SET(rep, REP_F_RECOVER);
+ }
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (!change)
+ return (0);
+
+ /*
+ * If the master changed, we need to start the process of
+ * figuring out what our last valid log record is. However,
+ * if both the master and we agree that the max LSN is 0,0,
+ * then there is no recovery to be done. If we are at 0 and
+ * the master is not, then we just need to request all the log
+ * records from the master.
+ */
+ dblp = dbenv->lg_handle;
+ lp = dblp->reginfo.primary;
+ R_LOCK(dbenv, &dblp->reginfo);
+ last_lsn = lsn = lp->lsn;
+ if (last_lsn.offset > sizeof(LOGP))
+ last_lsn.offset -= lp->len;
+ R_UNLOCK(dbenv, &dblp->reginfo);
+ if (IS_INIT_LSN(lsn)) {
+empty: MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ F_CLR(rep, REP_F_RECOVER);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+
+ if (IS_INIT_LSN(cntrl->lsn))
+ ret = 0;
+ else
+ ret = __rep_send_message(dbenv, rep->master_id,
+ REP_ALL_REQ, &lsn, NULL, 0);
+
+ if (ret == 0)
+ ret = DB_REP_NEWMASTER;
+ return (ret);
+ } else if (last_lsn.offset <= sizeof(LOGP)) {
+ /*
+ * We have just changed log files and need to set lastlsn
+ * to the last record in the previous log files.
+ */
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+ memset(&dbt, 0, sizeof(dbt));
+ ret = logc->get(logc, &last_lsn, &dbt, DB_LAST);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret == DB_NOTFOUND)
+ goto empty;
+ if (ret != 0)
+ return (ret);
+ }
+
+ if ((ret = __rep_send_message(dbenv,
+ eid, REP_VERIFY_REQ, &last_lsn, NULL, 0)) != 0)
+ return (ret);
+
+ return (DB_REP_NEWMASTER);
+}
+
+/*
+ * __rep_lockpgno_init
+ * Create a dispatch table for acquiring locks on each log record.
+ *
+ * PUBLIC: int __rep_lockpgno_init __P((DB_ENV *,
+ * PUBLIC: int (***)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: size_t *));
+ */
+int
+__rep_lockpgno_init(dbenv, dtabp, dtabsizep)
+ DB_ENV *dbenv;
+ int (***dtabp)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ size_t *dtabsizep;
+{
+ int ret;
+
+ /* Initialize dispatch table. */
+ *dtabsizep = 0;
+ *dtabp = NULL;
+ if ((ret = __bam_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __crdel_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __db_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __qam_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __ham_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __log_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0 ||
+ (ret = __txn_init_getpgnos(dbenv, dtabp, dtabsizep)) != 0)
+ return (ret);
+
+ return (0);
+}
+
+/*
+ * __rep_unlockpages --
+ * Unlock the pages locked in __rep_lockpages.
+ *
+ * PUBLIC: int __rep_unlockpages __P((DB_ENV *, u_int32_t));
+ */
+int
+__rep_unlockpages(dbenv, lid)
+ DB_ENV *dbenv;
+ u_int32_t lid;
+{
+ DB_LOCKREQ req, *lvp;
+
+ req.op = DB_LOCK_PUT_ALL;
+ return (dbenv->lock_vec(dbenv, lid, 0, &req, 1, &lvp));
+}
+
+/*
+ * __rep_lockpages --
+ * Called to gather and lock pages in preparation for both
+ * single transaction apply as well as client synchronization
+ * with a new master. A non-NULL key_lsn means that we're locking
+ * in order to apply a single log record during client recovery
+ * to the joint LSN. A non-NULL max_lsn means that we are applying
+ * a transaction whose commit is at max_lsn.
+ *
+ * PUBLIC: int __rep_lockpages __P((DB_ENV *,
+ * PUBLIC: int (**)(DB_ENV *, DBT *, DB_LSN *, db_recops, void *),
+ * PUBLIC: DB_LSN *, DB_LSN *, TXN_RECS *, u_int32_t));
+ */
+int
+__rep_lockpages(dbenv, dtab, key_lsn, max_lsn, recs, lid)
+ DB_ENV *dbenv;
+ int (**dtab)__P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ DB_LSN *key_lsn, *max_lsn;
+ TXN_RECS *recs;
+ u_int32_t lid;
+{
+ DBT data_dbt, lo;
+ DB_LOCK l;
+ DB_LOCKREQ *lvp;
+ DB_LOGC *logc;
+ DB_LSN tmp_lsn;
+ TXN_RECS tmp, *t;
+ db_pgno_t cur_pgno;
+ linfo_t locks;
+ int i, ret, t_ret, unique;
+ u_int32_t cur_fid;
+
+ /*
+ * There are two phases: First, we have to traverse backwards through
+ * the log records gathering the list of all the pages accessed. Once
+ * we have this information we can acquire all the locks we need.
+ */
+
+ /* Initialization */
+ memset(&locks, 0, sizeof(locks));
+ ret = 0;
+
+ t = recs != NULL ? recs : &tmp;
+ t->npages = t->nalloc = 0;
+ t->array = NULL;
+
+ /*
+ * We've got to be in one mode or the other; else life will either
+ * be excessively boring or overly exciting.
+ */
+ DB_ASSERT(key_lsn != NULL || max_lsn != NULL);
+ DB_ASSERT(key_lsn == NULL || max_lsn == NULL);
+
+ /*
+ * Phase 1: Fill in the pgno array.
+ */
+ memset(&data_dbt, 0, sizeof(data_dbt));
+ if (F_ISSET(dbenv, DB_ENV_THREAD))
+ F_SET(&data_dbt, DB_DBT_REALLOC);
+
+ /* Single transaction apply. */
+ if (max_lsn != NULL) {
+ tmp_lsn = *max_lsn;
+ if ((ret = __rep_apply_thread(dbenv, dtab,
+ &data_dbt, &tmp_lsn, t)) != 0)
+ goto err;
+ }
+
+ /* In recovery. */
+ if (key_lsn != NULL) {
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ goto err;
+ ret = logc->get(logc, key_lsn, &data_dbt, DB_SET);
+ if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ if (ret != 0)
+ goto err;
+
+ /* Save lsn values, since dispatch functions can change them. */
+ tmp_lsn = *key_lsn;
+ if ((ret = __db_dispatch(dbenv, dtab,
+ &data_dbt, &tmp_lsn, DB_TXN_APPLY, t)) != 0)
+ goto err;
+ }
+ if (t->npages == 0)
+ goto out;
+
+ /* Phase 2: Write lock all the pages. */
+
+ /* Sort the entries in the array by page number. */
+ qsort(t->array, t->npages, sizeof(LSN_PAGE), __rep_cmp_bypage);
+
+ /* Count the number of unique pages. */
+ cur_fid = DB_LOGFILEID_INVALID;
+ cur_pgno = PGNO_INVALID;
+ unique = 0;
+ for (i = 0; i < t->npages; i++) {
+ if (F_ISSET(&t->array[i], LSN_PAGE_NOLOCK))
+ continue;
+ if (t->array[i].pgdesc.pgno != cur_pgno ||
+ t->array[i].fid != cur_fid) {
+ cur_pgno = t->array[i].pgdesc.pgno;
+ cur_fid = t->array[i].fid;
+ unique++;
+ }
+ }
+
+ if (unique == 0)
+ goto out;
+
+ /* Handle single lock case specially, else allocate space for locks. */
+ if (unique == 1) {
+ memset(&lo, 0, sizeof(lo));
+ lo.data = &t->array[i].pgdesc;
+ lo.size = sizeof(&t->array[0].pgdesc);
+ ret = dbenv->lock_get(dbenv, lid, 0, &lo, DB_LOCK_WRITE, &l);
+ goto out2;
+ }
+
+ /* Multi-lock case. */
+ locks.n = unique;
+ if ((ret = __os_calloc(dbenv,
+ unique, sizeof(DB_LOCKREQ), &locks.reqs)) != 0)
+ goto err;
+ if ((ret = __os_calloc(dbenv, unique, sizeof(DBT), &locks.objs)) != 0)
+ goto err;
+
+ unique = 0;
+ cur_fid = DB_LOGFILEID_INVALID;
+ cur_pgno = PGNO_INVALID;
+ for (i = 0; i < t->npages; i++) {
+ if (F_ISSET(&t->array[i], LSN_PAGE_NOLOCK))
+ continue;
+ if (t->array[i].pgdesc.pgno != cur_pgno ||
+ t->array[i].fid != cur_fid) {
+ cur_pgno = t->array[i].pgdesc.pgno;
+ cur_fid = t->array[i].fid;
+ locks.reqs[unique].op = DB_LOCK_GET;
+ locks.reqs[unique].mode = DB_LOCK_WRITE;
+ locks.reqs[unique].obj = &locks.objs[unique];
+ locks.objs[unique].data = &t->array[i].pgdesc;
+ locks.objs[unique].size = sizeof(&t->array[i].pgdesc);
+ unique++;
+ }
+ }
+
+ /* Finally, get the locks. */
+ if ((ret =
+ dbenv->lock_vec(dbenv, lid, 0, locks.reqs, unique, &lvp)) != 0)
+ goto err;
+
+ if (0) {
+ /*
+ * If we finished successfully, then we need to retain
+ * the locks, but we can free everything else up, because
+ * we can do a release by locker-id.
+ */
+err: if ((t_ret = __rep_unlockpages(dbenv, lid)) != 0 && ret == 0)
+ ret = t_ret;
+ }
+
+out: if (locks.objs != NULL)
+ __os_free(dbenv, locks.objs, locks.n * sizeof(DBT));
+ if (locks.reqs != NULL)
+ __os_free(dbenv, locks.reqs, locks.n * sizeof(DB_LOCKREQ));
+
+ /*
+ * Before we return, sort by LSN so that we apply records in the
+ * right order.
+ */
+ qsort(t->array, t->npages, sizeof(LSN_PAGE), __rep_cmp_bylsn);
+
+out2: if ((ret != 0 || recs == NULL) && t->nalloc != 0)
+ __os_free(dbenv, t->array, t->nalloc * sizeof(LSN_PAGE));
+
+ if (F_ISSET(&data_dbt, DB_DBT_REALLOC) && data_dbt.data != NULL)
+ __os_free(dbenv, data_dbt.data, 0);
+
+ return (ret);
+}
+
+/*
+ * __rep_cmp_bypage and __rep_cmp_bylsn --
+ * Sort functions for qsort. "bypage" sorts first by page numbers and
+ * then by the LSN. "bylsn" sorts first by the LSN, then by page numbers.
+ */
+static int
+__rep_cmp_bypage(a, b)
+ const void *a, *b;
+{
+ LSN_PAGE *ap, *bp;
+
+ ap = (LSN_PAGE *)a;
+ bp = (LSN_PAGE *)b;
+
+ if (ap->fid < bp->fid)
+ return (-1);
+
+ if (ap->fid > bp->fid)
+ return (1);
+
+ if (ap->pgdesc.pgno < bp->pgdesc.pgno)
+ return (-1);
+
+ if (ap->pgdesc.pgno > bp->pgdesc.pgno)
+ return (1);
+
+ if (ap->lsn.file < bp->lsn.file)
+ return (-1);
+
+ if (ap->lsn.file > bp->lsn.file)
+ return (1);
+
+ if (ap->lsn.offset < bp->lsn.offset)
+ return (-1);
+
+ if (ap->lsn.offset > bp->lsn.offset)
+ return (1);
+
+ return (0);
+}
+
+static int
+__rep_cmp_bylsn(a, b)
+ const void *a, *b;
+{
+ LSN_PAGE *ap, *bp;
+
+ ap = (LSN_PAGE *)a;
+ bp = (LSN_PAGE *)b;
+
+ if (ap->lsn.file < bp->lsn.file)
+ return (-1);
+
+ if (ap->lsn.file > bp->lsn.file)
+ return (1);
+
+ if (ap->lsn.offset < bp->lsn.offset)
+ return (-1);
+
+ if (ap->lsn.offset > bp->lsn.offset)
+ return (1);
+
+ if (ap->fid < bp->fid)
+ return (-1);
+
+ if (ap->fid > bp->fid)
+ return (1);
+
+ if (ap->pgdesc.pgno < bp->pgdesc.pgno)
+ return (-1);
+
+ if (ap->pgdesc.pgno > bp->pgdesc.pgno)
+ return (1);
+
+ return (0);
+}
+
+/*
+ * __rep_apply_thread
+ * Recursive function that will let us visit every entry in a transaction
+ * chain including all child transactions so that we can then apply
+ * the entire transaction family at once.
+ */
+static int
+__rep_apply_thread(dbenv, dtab, datap, lsnp, recp)
+ DB_ENV *dbenv;
+ int (**dtab) __P((DB_ENV *, DBT *, DB_LSN *, db_recops, void *));
+ DBT *datap;
+ DB_LSN *lsnp;
+ TXN_RECS *recp;
+{
+ __txn_child_args *argp;
+ DB_LOGC *logc;
+ DB_LSN c_lsn;
+ u_int32_t rectype;
+ int ret, t_ret;
+
+ if ((ret = dbenv->log_cursor(dbenv, &logc, 0)) != 0)
+ return (ret);
+
+ while (!IS_ZERO_LSN(*lsnp) &&
+ (ret = logc->get(logc, lsnp, datap, DB_SET)) == 0) {
+ memcpy(&rectype, datap->data, sizeof(rectype));
+ if (rectype == DB_txn_child) {
+ if ((ret = __txn_child_read(dbenv,
+ datap->data, &argp)) != 0)
+ goto err;
+ c_lsn = argp->c_lsn;
+ *lsnp = argp->prev_lsn;
+ __os_free(dbenv, argp, 0);
+ ret = __rep_apply_thread(dbenv,
+ dtab, datap, &c_lsn, recp);
+ } else {
+ ret = __db_dispatch(dbenv, dtab,
+ datap, lsnp, DB_TXN_APPLY, recp);
+ /*
+ * Explicitly copy the previous lsn since the
+ * page gathering routines don't modify it for you.
+ */
+ memcpy(lsnp, (u_int8_t *)datap->data +
+ sizeof(u_int32_t) + sizeof (DB_TXN *),
+ sizeof(DB_LSN));
+ }
+
+ if (ret != 0)
+ goto err;
+ }
+
+err: if ((t_ret = logc->close(logc, 0)) != 0 && ret == 0)
+ ret = t_ret;
+ return (ret);
+}
+
+/*
+ * __rep_is_client
+ * Used by other subsystems to figure out if this is a replication
+ * client sites.
+ *
+ * PUBLIC: int __rep_is_client __P((DB_ENV *));
+ */
+int
+__rep_is_client(dbenv)
+ DB_ENV *dbenv;
+{
+ DB_REP *db_rep;
+ REP *rep;
+ int ret;
+
+ if ((db_rep = dbenv->rep_handle) == NULL)
+ return (0);
+ rep = db_rep->region;
+
+ MUTEX_LOCK(dbenv, db_rep->mutexp, dbenv->lockfhp);
+ ret = F_ISSET(rep, REP_F_UPGRADE | REP_F_LOGSONLY);
+ MUTEX_UNLOCK(dbenv, db_rep->mutexp);
+ return (ret);
+}
+
+/*
+ * __rep_send_vote
+ * Send this site's vote for the election.
+ *
+ * PUBLIC: int __rep_send_vote __P((DB_ENV *, DB_LSN *, int, int));
+ */
+int
+__rep_send_vote(dbenv, lsnp, nsites, pri)
+ DB_ENV *dbenv;
+ DB_LSN *lsnp;
+ int nsites, pri;
+{
+ DBT vote_dbt;
+ REP_VOTE_INFO vi;
+
+ vi.priority = pri;
+ vi.nsites = nsites;
+
+ memset(&vote_dbt, 0, sizeof(vote_dbt));
+ vote_dbt.data = &vi;
+ vote_dbt.size = sizeof(vi);
+
+ return (__rep_send_message(dbenv,
+ DB_BROADCAST_EID, REP_VOTE1, lsnp, &vote_dbt, 0));
+}
+
+/*
+ * __rep_grow_sites --
+ * Called to allocate more space in the election tally information.
+ * Called with the rep mutex held. We need to call the region mutex, so
+ * we need to make sure that we *never* acquire those mutexes in the
+ * opposite order.
+ *
+ * PUBLIC: int __rep_grow_sites __P((DB_ENV *dbenv, int nsites));
+ */
+int
+__rep_grow_sites(dbenv, nsites)
+ DB_ENV *dbenv;
+ int nsites;
+{
+ REGENV *renv;
+ REGINFO *infop;
+ REP *rep;
+ int nalloc, ret, *tally;
+
+ rep = ((DB_REP *)dbenv->rep_handle)->region;
+
+ /*
+ * Allocate either twice the current allocation or nsites,
+ * whichever is more.
+ */
+
+ nalloc = 2 * rep->asites;
+ if (nalloc < nsites)
+ nalloc = nsites;
+
+ infop = dbenv->reginfo;
+ renv = infop->primary;
+ MUTEX_LOCK(dbenv, &renv->mutex, dbenv->lockfhp);
+ if ((ret = __db_shalloc(infop->addr,
+ sizeof(nalloc * sizeof(int)), sizeof(int), &tally)) == 0) {
+ if (rep->tally_off != INVALID_ROFF)
+ __db_shalloc_free(infop->addr,
+ R_ADDR(infop, rep->tally_off));
+ rep->asites = nalloc;
+ rep->nsites = nsites;
+ rep->tally_off = R_OFFSET(infop, tally);
+ }
+ MUTEX_UNLOCK(dbenv, &renv->mutex);
+ return (ret);
+}
diff --git a/db/tcl/docs/rep.html b/db/tcl/docs/rep.html
new file mode 100644
index 000000000..fb29579ad
--- /dev/null
+++ b/db/tcl/docs/rep.html
@@ -0,0 +1,50 @@
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <title>Replication commands</title>
+</head>
+<body>
+
+<h2>
+<a NAME="Replication Commands"></a>Replication Commands</h2>
+Replication commands are invoked from the environment handle, after
+it has been opened with the appropriate flags defined
+<a href="./env.html">here</a>.<br>
+<hr WIDTH="100%">
+<p><b>> &lt;env> rep_process_message <i>machid</i> <i>control</i>
+<i>rec</i></b>
+<p>This command processes a single incoming replication message.&nbsp; It
+is a direct translation of the <a
+href="../../docs/api_c/rep_process_message.html">rep_process_message</a>
+function.&nbsp;
+It returns either a 0 (for success), a DB error message or it throws a
+Tcl error with a system message.&nbsp; The arguments are:
+<ul>
+<li>
+<b>machid </b>is the machine ID of the machine that <i>sent</i> this
+message.</li>
+
+<li>
+<b>control</b> is a binary string containing the exact contents of the
+<b><i>control</i></b> argument to the <b><i>sendproc</i></b> function
+that was passed this message on another site.</li>
+
+<li>
+<b>rec</b> is a binary string containing the exact contents of the
+<b><i>rec</i></b> argument to the <b><i>sendproc</i></b> function
+that was passed this message on another site.</li>
+</ul>
+
+<hr WIDTH="100%">
+<br><b>> &lt;env> rep_elect <i>nsites</i> <i>pri</i> <i>wait</i>
+<i>sleep</i></b>
+<p>This command causes a replication election.&nbsp; It is a direct translation
+of the <a href="../../docs/api_c/rep_elect.html">rep_elect</a> function.&nbsp;
+Its arguments, all integers, correspond exactly to that C function's
+parameters.
+It will return a list containing two integers, which contain,
+respectively, the integer values returned in the C function's
+<i><b>midp</b></i> and <i><b>selfp</b></i> parameters.
+</body>
+</html>
diff --git a/db/test/dead006.tcl b/db/test/dead006.tcl
new file mode 100644
index 000000000..d6ede9cb4
--- /dev/null
+++ b/db/test/dead006.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: dead006.tcl,v 1.3 2001/10/11 16:15:30 sandstro Exp
+#
+# TEST dead006
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead006 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 1000} {tnum 006} } {
+ source ./include.tcl
+
+ dead001 $procs $tests $timeout $tnum
+ dead002 $procs $tests $timeout $tnum
+}
diff --git a/db/test/dead007.tcl b/db/test/dead007.tcl
new file mode 100644
index 000000000..f7fe42c21
--- /dev/null
+++ b/db/test/dead007.tcl
@@ -0,0 +1,35 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: dead007.tcl,v 1.1 2001/10/10 16:23:48 ubell Exp
+#
+# TEST dead007
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead007 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+ puts "Dead007.a -- wrap around"
+ set lock_curid [expr $lock_maxid - 2]
+ dead001 "2 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "4 10"
+ dead004
+
+ puts "Dead007.b -- extend space"
+ set lock_maxid [expr $lock_maxid - 3]
+ set lock_curid [expr $lock_maxid - 1]
+ dead001 "4 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "10"
+ dead004
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+ puts "Dead007 -- complete"
+}
diff --git a/db/test/env010.tcl b/db/test/env010.tcl
new file mode 100644
index 000000000..18bf1a717
--- /dev/null
+++ b/db/test/env010.tcl
@@ -0,0 +1,49 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: env010.tcl,v 1.2 2001/08/03 16:39:24 bostic Exp
+#
+# TEST env010
+# TEST Run recovery in an empty directory, and then make sure we can still
+# TEST create a database in that directory.
+proc env010 { } {
+ source ./include.tcl
+
+ puts "Env010: Test of recovery in an empty directory."
+
+ # Create a new directory used only for this test
+
+ if { [file exists $testdir/EMPTYDIR] != 1 } {
+ file mkdir $testdir/EMPTYDIR
+ } else {
+ puts "\nDirectory already exists."
+ }
+
+ # Do the test twice, for regular recovery and catastrophic
+ # Open environment and recover, but don't create a database
+
+ foreach rmethod {recover recover_fatal} {
+
+ puts "\tEnv010: Creating env for $rmethod test."
+ env_cleanup $testdir/EMPTYDIR
+ set e [berkdb env -create -home $testdir/EMPTYDIR -$rmethod]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ # Open and close a database
+ # The method doesn't matter, so picked btree arbitrarily
+
+ set db [eval {berkdb_open -env $e \
+ -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ # Close environment
+
+ error_check_good envclose [$e close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/EMPTYDIR] 0
+ }
+ puts "\tEnv010 complete."
+}
diff --git a/db/test/lock004.tcl b/db/test/lock004.tcl
new file mode 100644
index 000000000..1976ac7a0
--- /dev/null
+++ b/db/test/lock004.tcl
@@ -0,0 +1,49 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: lock004.tcl,v 11.1 2001/10/10 16:22:10 ubell Exp
+#
+# TEST lock004
+# TEST Test locker ids wraping around.
+
+proc lock004 {} {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ puts "Lock004.a -- locker id wrapping"
+ locktest -S [expr $lock_maxid - 1] $lock_maxid
+
+ puts "Lock004.b -- test out of locker ids"
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ set e [berkdb env -create -lock -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+
+ catch { $e lock_id } locker1
+ error_check_good locker1 [is_valid_locker $locker1] TRUE
+ error_check_good lock_id_set \
+ [$e lock_id_set [expr $lock_maxid - 1] $lock_maxid] 0
+
+ catch { $e lock_id } locker2
+ error_check_good locker2 [is_valid_locker $locker2] TRUE
+ catch { $e lock_id } locker3
+ error_check_bad locker3 [is_valid_locker $locker3] TRUE
+ error_check_good locker3 [string match "*wrapped*" $locker3] 1
+
+ catch { $e lock_id_free $locker1 } ret
+ error_check_good free $ret 0
+ catch { $e lock_id } locker4
+ error_check_good locker4 [is_valid_locker $locker4] TRUE
+
+ catch { $e lock_id_free $locker2 } ret
+ error_check_good free $ret 0
+ catch { $e lock_id_free $locker4 } ret
+ error_check_good free $ret 0
+
+ catch {$e close} ret
+ error_check_good close $ret 0
+}
diff --git a/db/test/parallel.tcl b/db/test/parallel.tcl
new file mode 100644
index 000000000..fdfc95d87
--- /dev/null
+++ b/db/test/parallel.tcl
@@ -0,0 +1,214 @@
+# Code to load up the tests in to the Queue database
+# Id: parallel.tcl,v 11.9 2001/10/03 20:48:51 sandstro Exp
+proc load_queue { file {dbdir RUNQUEUE} nitems } {
+
+ puts -nonewline "Loading run queue with $nitems items..."
+ flush stdout
+
+ set env [berkdb env -create -lock -home $dbdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ set db [eval {berkdb_open -env $env -create -truncate \
+ -mode 0644 -len 100 -queue queue.db} ]
+ error_check_good dbopen [is_valid_widget $db db] TRUE
+
+ set fid [open $file]
+
+ set count 0
+
+ while { [gets $fid str] != -1 } {
+ set testarr($count) $str
+ incr count
+ }
+
+ # Randomize array of tests.
+ set rseed [pid]
+ berkdb srand $rseed
+ puts -nonewline "randomizing..."
+ flush stdout
+ for { set i 0 } { $i < $count } { incr i } {
+ set j [berkdb random_int $i [expr $count - 1]]
+
+ set tmp $testarr($i)
+ set testarr($i) $testarr($j)
+ set testarr($j) $tmp
+ }
+
+ if { [string compare ALL $nitems] != 0 } {
+ set maxload $nitems
+ } else {
+ set maxload $count
+ }
+
+ puts "loading..."
+ flush stdout
+ for { set i 0 } { $i < $maxload } { incr i } {
+ set str $testarr($i)
+ set ret [eval {$db put -append $str} ]
+ error_check_good put:$db $ret [expr $i + 1]
+ }
+
+ puts "Loaded $maxload records (out of $count)."
+ close $fid
+ $db close
+ $env close
+}
+
+proc init_runqueue { {dbdir RUNQUEUE} nitems } {
+
+ if { [file exists $dbdir] != 1 } {
+ file mkdir $dbdir
+ }
+ puts "Creating test list..."
+ run_all -n
+ load_queue ALL.OUT $dbdir $nitems
+ file delete ALL.OUT
+}
+
+proc run_parallel { nprocs {nitems ALL} } {
+ set basename ./PARALLEL_TESTDIR
+ set queuedir ./RUNQUEUE
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ init_runqueue $queuedir $nitems
+
+ set basedir [pwd]
+ set pidlist {}
+ set queuedir ../../[string range $basedir [string last "/" $basedir] end]/$queuedir
+
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ fileremove -f ALL.OUT.$i
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "source $test_path/test.tcl;\
+ run_queue $i $basename.$i $queuedir $nitems" &]
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs 300 360000
+
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Regression tests failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Regression tests succeeded."
+ }
+}
+
+proc run_queue { i rundir queuedir nitems } {
+ set builddir [pwd]
+ file delete $builddir/ALL.OUT.$i
+ cd $rundir
+
+ puts "Parallel run_queue process $i (pid [pid]) starting."
+
+ source ./include.tcl
+ global env
+
+ set dbenv [berkdb env -create -lock -home $queuedir]
+ error_check_good dbenv [is_valid_widget $dbenv env] TRUE
+
+ set db [eval {berkdb_open -env $dbenv \
+ -mode 0644 -len 80 -queue queue.db} ]
+ error_check_good dbopen [is_valid_widget $db db] TRUE
+
+ set dbc [eval $db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ set waitcnt 0
+
+ while { $waitcnt < 5 } {
+ set line [$db get -consume]
+ if { [ llength $line ] > 0 } {
+ set cmd [lindex [lindex $line 0] 1]
+ set num [lindex [lindex $line 0] 0]
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nExecuting record $num:\n"
+ puts $o $cmd
+ close $o
+ if { [expr {$num % 10} == 0] } {
+ puts "Starting test $num of $nitems"
+ }
+ #puts "Process $i, record $num:\n$cmd"
+ set env(PURIFYOPTIONS) \
+ "-log-file=./test$num.%p -follow-child-processes -messages=first"
+ set env(PURECOVOPTIONS) \
+ "-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; $cmd" \
+ >>& $builddir/ALL.OUT.$i } res] {
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "FAIL: '$cmd': $res"
+ close $o
+ }
+ env_cleanup $testdir
+ incr count
+ } else {
+ incr waitcnt
+ tclsleep 1
+ }
+ }
+
+ puts "Process $i: $count commands executed"
+
+ $dbc close
+ $db close
+ $dbenv close
+
+ set f [open $builddir/$testdir/end.[pid] w]
+ close $f
+}
+
+proc mkparalleldirs { nprocs basename queuedir } {
+ source ./include.tcl
+ set dir [pwd]
+
+ if { $is_windows_test != 1 } {
+ set EXE ""
+ } else {
+ set EXE ".exe"
+ }
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set destdir $basename.$i
+ catch {file mkdir $destdir}
+ if { $is_windows_test == 1 } {
+ catch {file mkdir $destdir/Debug}
+ catch {eval file copy \
+ [eval glob {$dir/Debug/*.dll}] $destdir/Debug}
+ }
+ catch {eval file copy \
+ [eval glob {$dir/{.libs,include.tcl}}] $destdir}
+ # catch {eval file copy $dir/$queuedir $destdir}
+ catch {eval file copy \
+ [eval glob {$dir/db_{checkpoint,deadlock}$EXE} \
+ {$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
+ {$dir/db_{archive,verify}$EXE}] \
+ $destdir}
+
+ # Create modified copies of include.tcl in parallel
+ # directories so paths still work.
+
+ set infile [open ./include.tcl r]
+ set d [read $infile]
+ close $infile
+
+ regsub {test_path } $d {test_path ../} d
+ regsub {src_root } $d {src_root ../} d
+ regsub {KILL \.} $d {KILL ..} d
+ set outfile [open $destdir/include.tcl w]
+ puts $outfile $d
+ close $outfile
+
+ if { [file exists $dir/berkeley_db_svc$EXE] } {
+ catch {eval file copy $dir/berkeley_db_svc$EXE $destdir}
+ }
+ }
+}
diff --git a/db/test/recd15scr.tcl b/db/test/recd15scr.tcl
new file mode 100644
index 000000000..53760e635
--- /dev/null
+++ b/db/test/recd15scr.tcl
@@ -0,0 +1,74 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: recd15scr.tcl,v 1.3 2001/05/10 15:22:18 sue Exp
+#
+# Recd15 - lots of txns - txn prepare script
+# Usage: recd15script envcmd dbcmd gidf numtxns
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# numtxns: number of txns to start
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "recd15script envcmd dbfile gidfile numtxns"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set numtxns [ lindex $argv 3 ]
+
+set txnmax [expr $numtxns + 5]
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 0
+if { $dbfile != "NULL" } {
+ set usedb 1
+ set db [berkdb_open -env $dbenv $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+}
+
+puts "\tRecd015script.a: Begin $numtxns txns"
+for {set i 0} {$i < $numtxns} {incr i} {
+ set t [$dbenv txn]
+ error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE
+ set txns($i) $t
+ if { $usedb } {
+ set dbc [$db cursor -txn $t]
+ error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE
+ set curs($i) $dbc
+ }
+}
+
+puts "\tRecd015script.b: Prepare $numtxns txns"
+set gfd [open $gidfile w+]
+for {set i 0} {$i < $numtxns} {incr i} {
+ if { $usedb } {
+ set dbc $curs($i)
+ error_check_good dbc_close [$dbc close] 0
+ }
+ set t $txns($i)
+ set gid [make_gid recd015script:$t]
+ puts $gfd $gid
+ error_check_good txn_prepare:$t [$t prepare $gid] 0
+}
+close $gfd
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tRecd015script completed successfully"
+flush stdout
diff --git a/db/test/rep001.tcl b/db/test/rep001.tcl
new file mode 100644
index 000000000..57b9b97fb
--- /dev/null
+++ b/db/test/rep001.tcl
@@ -0,0 +1,214 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: rep001.tcl,v 11.2 2001/10/05 02:38:09 bostic Exp
+#
+# TEST rep001
+# TEST Replication smoke test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST verify that the database on the client is correct.
+
+
+proc rep001 { method args } {
+ source ./include.tcl
+ global testdir
+
+ env_cleanup $testdir
+
+ replsetup $testdir/REPDIR_MSGQUEUE
+
+ set masterdir $testdir/REPDIR_MASTER
+ set clientdir $testdir/REPDIR_CLIENT
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ puts "Rep001: Replication sanity test."
+
+ # Open a master.
+ repladd 1
+ set masterenv [berkdb env -create \
+ -home $masterdir -txn -rep_master -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open a client
+ repladd 2
+ set clientenv [berkdb env -create \
+ -home $clientdir -txn -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Run a modified test001 in the master.
+ puts "\tRep001.a: Running test001 in replicated env."
+ eval rep_test001 $method 1000 "01" -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ puts "\tRep001.b: Verifying client database contents."
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test001.db $clientenv "" $testdir/t1 test001.check \
+ dump_file_direction "-first" "-next"
+ filesort $t1 $t3
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir "\tRep001.c:" 0 0 1
+}
+
+proc rep_test001 { method {nentries 10000} {tnum "01"} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tRep0$tnum: $method ($args) $nentries equal key/data pairs"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ # If we are not using an external env, then test setting
+ # the database cache size and using multiple caches.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ append args " -cachesize {0 1048576 3} "
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test001.check
+ }
+ puts "\t\tRep0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ set curtxn [$env txn]
+ set ret [eval {$db put} \
+ -txn $curtxn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ error_check_good txn_commit($key) [$curtxn commit] 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for success
+ set ret [$db get -get_both $key [pad_data $method $str]]
+ error_check_good \
+ getboth $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for failure
+ set ret [$db get -get_both $key [pad_data $method BAD$str]]
+ error_check_good getbothBAD [llength $ret] 0
+
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\t\tRep0$tnum.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j [expr $i]
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ close $oid
+ } else {
+ set q q
+ filehead $nentries $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good \tRep0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\t\tRep0$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \tRep0$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\t\tRep0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $txn $t1 $checkfunc \
+ dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \tRep0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
diff --git a/db/test/reputils.tcl b/db/test/reputils.tcl
new file mode 100644
index 000000000..71b88135c
--- /dev/null
+++ b/db/test/reputils.tcl
@@ -0,0 +1,104 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: reputils.tcl,v 11.2 2001/10/05 02:38:09 bostic Exp
+#
+# Replication testing utilities
+
+# Environment handle for the env containing the replication "communications
+# structure" (really a CDB environment).
+global queueenv
+
+# Array of DB handles, one per machine ID, for the databases that contain
+# messages.
+global queuedbs
+global machids
+
+# Create a replication group for testing.
+proc replsetup { queuedir } {
+ global queueenv queuedbs machids
+
+ file mkdir $queuedir
+ set queueenv \
+ [berkdb env -create -cdb -home $queuedir]
+ error_check_good queueenv [is_valid_env $queueenv] TRUE
+
+ if { [info exists queuedbs] } {
+ unset queuedbs
+ }
+ set machids {}
+
+ return $queueenv
+}
+
+proc replsend { control rec fromid toid } {
+ global queuedbs machids
+
+
+ # XXX
+ # -1 is DB_BROADCAST_MID
+ if { $toid == -1 } {
+ set machlist $machids
+ } else {
+ if { [info exists queuedbs($toid)] != 1 } {
+ puts stderr "FAIL: replsend: machid $toid not found"
+ return -1
+ }
+ set machlist [list $toid]
+ }
+
+ foreach m $machlist {
+ # XXX should a broadcast include to "self"?
+ if { $m == $fromid } {
+ continue
+ }
+
+ set db $queuedbs($m)
+
+ $db put -append [list $control $rec $fromid]
+ }
+
+ return 0
+}
+
+proc repladd { machid } {
+ global queueenv queuedbs machids
+
+ if { [info exists queuedbs($machid)] == 1 } {
+ error "FAIL: repladd: machid $machid already exists"
+ }
+
+ set queuedbs($machid) \
+ [berkdb open -env $queueenv -create -recno repqueue$machid.db]
+ error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
+
+ lappend machids $machid
+}
+
+proc replprocessqueue { dbenv machid } {
+ global queuedbs
+
+ set nproced 0
+
+ set dbc [$queuedbs($machid) cursor -update]
+ error_check_good process_dbc($machid) \
+ [is_valid_cursor $dbc $queuedbs($machid)] TRUE
+
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set data [lindex [lindex $dbt 0] 1]
+
+ error_check_good process_message [$dbenv rep_process_message \
+ [lindex $data 2] [lindex $data 0] [lindex $data 1]] 0
+
+ incr nproced
+
+ $dbc del
+ }
+
+ # Return the number of messages processed.
+ return $nproced
+}
diff --git a/db/test/rsrc004.tcl b/db/test/rsrc004.tcl
new file mode 100644
index 000000000..32c0cb738
--- /dev/null
+++ b/db/test/rsrc004.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: rsrc004.tcl,v 11.2 2001/08/03 16:39:29 bostic Exp
+#
+# TEST rsrc004
+# TEST Recno backing file test for EOF-terminated records.
+proc rsrc004 { } {
+ source ./include.tcl
+
+ foreach isfixed { 0 1 } {
+ cleanup $testdir NULL
+
+ # Create the backing text file.
+ set oid1 [open $testdir/rsrc.txt w]
+ if { $isfixed == 1 } {
+ puts -nonewline $oid1 "record 1xxx"
+ puts -nonewline $oid1 "record 2xxx"
+ } else {
+ puts $oid1 "record 1xxx"
+ puts $oid1 "record 2xxx"
+ }
+ puts -nonewline $oid1 "record 3"
+ close $oid1
+
+ set args "-create -mode 0644 -recno -source $testdir/rsrc.txt"
+ if { $isfixed == 1 } {
+ append args " -len [string length "record 1xxx"]"
+ set match "record 3 "
+ puts "Rsrc004: EOF-terminated recs: fixed length"
+ } else {
+ puts "Rsrc004: EOF-terminated recs: variable length"
+ set match "record 3"
+ }
+
+ puts "\tRsrc004.a: Read file, verify correctness."
+ set db [eval berkdb_open $args "$testdir/rsrc004.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record
+ set dbc [eval {$db cursor} ""]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last $rec [list [list 3 $match]]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/db/test/scr001/chk.code b/db/test/scr001/chk.code
new file mode 100644
index 000000000..7dd5a549c
--- /dev/null
+++ b/db/test/scr001/chk.code
@@ -0,0 +1,37 @@
+#!/bin/sh -
+#
+# Id: chk.code,v 1.8 2001/10/12 17:55:31 bostic Exp
+#
+# Check to make sure that the code samples in the documents build.
+
+d=../..
+
+[ -d $d/docs_src ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || make libdb.a || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+for i in `find $d/docs_src -name '*.cs'`; do
+ echo " compiling $i"
+ sed -e 's/m4_include(\(.*\))/#include <\1>/g' \
+ -e 's/m4_[a-z]*[(\[)]*//g' \
+ -e 's/(\[//g' \
+ -e '/argv/!s/])//g' \
+ -e 's/dnl//g' \
+ -e 's/__GT__/>/g' \
+ -e 's/__LB__/[/g' \
+ -e 's/__LT__/</g' \
+ -e 's/__RB__/]/g' < $i > t.c
+ if cc -Wall -I.. t.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+done
+
+exit 0
diff --git a/db/test/scr002/chk.def b/db/test/scr002/chk.def
new file mode 100644
index 000000000..508a69bb3
--- /dev/null
+++ b/db/test/scr002/chk.def
@@ -0,0 +1,64 @@
+#!/bin/sh -
+#
+# Id: chk.def,v 1.7 2001/10/12 17:55:31 bostic Exp
+#
+# Check to make sure we haven't forgotten to add any interfaces
+# to the Win32 libdb.def file.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/build_win32/libdb.def
+t1=__1
+t2=__2
+
+exitv=0
+
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/db_xa_switch/d' \
+ -e '/^__/d' -e '/^;/d' |
+ sort > $t1
+
+egrep __P $d/include_auto/global_ext.in |
+ sed '/^[a-z]/!d' |
+ awk '{print $2}' |
+ sed 's/^\*//' |
+ sed '/^__/d' | sort > $t2
+
+if cmp -s $t1 $t2 ; then
+ :
+else
+ echo "<<< libdb.def >>> DB include files"
+ diff $t1 $t2
+ echo "FAIL: missing items in libdb.def file."
+ exitv=1
+fi
+
+# Check to make sure we don't have any extras in the libdb.def file.
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/__db_global_values/d' > $t1
+
+for i in `cat $t1`; do
+ if egrep $i $d/*/*.c > /dev/null; then
+ :
+ else
+ echo "$f: $i not found in DB sources"
+ fi
+done > $t2
+
+test -s $t2 && {
+ cat $t2
+ echo "FAIL: found unnecessary items in libdb.def file."
+ exitv=1
+}
+
+exit $exitv
diff --git a/db/test/scr003/chk.define b/db/test/scr003/chk.define
new file mode 100644
index 000000000..ea47734c8
--- /dev/null
+++ b/db/test/scr003/chk.define
@@ -0,0 +1,76 @@
+#!/bin/sh -
+#
+# Id: chk.define,v 1.15 2001/10/12 17:55:32 bostic Exp
+#
+# Check to make sure that all #defines are actually used.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t1=__1
+t2=__2
+
+egrep '^#define' $d/include/*.h $d/include/*.in |
+ sed -e '/db_185.in/d' -e '/xa.h/d' |
+ awk '{print $2}' |
+ sed -e '/^ALIGNP/d' \
+ -e '/^B_DELETE/d' \
+ -e '/^B_MAX/d' \
+ -e '/^CIRCLEQ/d' \
+ -e '/^DB_AM_TXN/d' \
+ -e '/^DB_BTREEOLDVER/d' \
+ -e '/^DB_HASHOLDVER/d' \
+ -e '/^DB_LOCKVERSION/d' \
+ -e '/^DB_MAX_PAGES/d' \
+ -e '/^DB_QAMOLDVER/d' \
+ -e '/^DB_RO_ACCESS/d' \
+ -e '/^DB_TXNVERSION/d' \
+ -e '/^DEFINE_DB_CLASS/d' \
+ -e '/^LIST/d' \
+ -e '/^LOG_OP/d' \
+ -e '/^MINFILL/d' \
+ -e '/^MUTEX_FIELDS/d' \
+ -e '/^NCACHED2X/d' \
+ -e '/^NCACHED30/d' \
+ -e '/^PAIR_MASK/d' \
+ -e '/^POWER_OF_TWO/d' \
+ -e '/^P_16_COPY/d' \
+ -e '/^P_32_COPY/d' \
+ -e '/^P_32_SWAP/d' \
+ -e '/^SH_CIRCLEQ/d' \
+ -e '/^SH_LIST/d' \
+ -e '/^SH_TAILQ/d' \
+ -e '/^TAILQ/d' \
+ -e '/^WRAPPED_CLASS/d' \
+ -e '/^XA_$/d' \
+ -e '/^__BIT_TYPES_DEFINED__/d' \
+ -e '/^__DBC_INTERNAL/d' \
+ -e '/_AUTO_H$/d' \
+ -e '/_H_$/d' \
+ -e '/_UNUSED/d' \
+ -e '/^i_/d' \
+ -e '/ext_h_/d' \
+ -e 's/(.*//' | sort > $t1
+
+for i in `cat $t1`; do
+ if egrep -w $i $d/*/*.c $d/*/*.cpp > /dev/null; then
+ :;
+ else
+ f=`egrep -l "#define.*$i" $d/include/*.h $d/include/*.in |
+ sed 's;\.\.\/\.\.\/include/;;' | tr -s "[:space:]" " "`
+ echo "FAIL: $i: $f"
+ fi
+done | sort +1 > $t2
+
+test -s $t2 && {
+ cat $t2
+ echo "FAIL: found unused #defines"
+ exit 1
+}
+
+exit $exitv
diff --git a/db/test/scr004/chk.javafiles b/db/test/scr004/chk.javafiles
new file mode 100644
index 000000000..248b5570b
--- /dev/null
+++ b/db/test/scr004/chk.javafiles
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# Id: chk.javafiles,v 1.4 2001/10/12 17:55:33 bostic Exp
+#
+# Check to make sure we haven't forgotten to add any Java files to the list
+# of source files in the Makefile.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/Makefile.in
+d=$d/java/src/com/sleepycat
+
+t1=__1
+t2=__2
+
+find ${d}/db/ $d/examples -name \*.java -print |
+ sed -e 's/^.*\///' | sort > $t1
+tr ' \t' '\n' < ${f} | sed -e '/\.java$/!d' -e 's/^.*\///' | sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< java source files >>> Makefile"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/db/test/scr005/chk.nl b/db/test/scr005/chk.nl
new file mode 100644
index 000000000..876be2f94
--- /dev/null
+++ b/db/test/scr005/chk.nl
@@ -0,0 +1,100 @@
+#!/bin/sh -
+#
+# Id: chk.nl,v 1.5 2001/10/12 17:55:33 bostic Exp
+#
+# Check to make sure that there are no trailing newlines in __db_err calls.
+
+d=../..
+
+[ -f $d/README ] || {
+ echo "FAIL: chk.nl can't find the source directory."
+ exit 1
+}
+
+cat << END_OF_CODE > t.c
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+int chk(FILE *, char *);
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ FILE *fp;
+ int exitv;
+
+ for (exitv = 0; *++argv != NULL;) {
+ if ((fp = fopen(*argv, "r")) == NULL) {
+ fprintf(stderr, "%s: %s\n", *argv, strerror(errno));
+ return (1);
+ }
+ if (chk(fp, *argv))
+ exitv = 1;
+ (void)fclose(fp);
+ }
+ return (exitv);
+}
+
+int
+chk(fp, name)
+ FILE *fp;
+ char *name;
+{
+ int ch, exitv, line, q;
+
+ exitv = 0;
+ for (ch = 'a', line = 1;;) {
+ if ((ch = getc(fp)) == EOF)
+ return (exitv);
+ if (ch == '\n') {
+ ++line;
+ continue;
+ }
+ if (ch != '_') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'd') continue;
+ if ((ch = getc(fp)) != 'b') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'e') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ while ((ch = getc(fp)) != '"') {
+ if (ch == EOF)
+ return (exitv);
+ if (ch == '\n')
+ ++line;
+ }
+ while ((ch = getc(fp)) != '"') {
+ if (ch == EOF)
+ return (exitv);
+ if (ch == '\n')
+ ++line;
+ if (ch == '\\\\')
+ if ((ch = getc(fp)) != 'n')
+ ungetc(ch, fp);
+ else if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <newline> at line %d\n", name, line);
+ exitv = 1;
+ }
+ }
+ }
+ return (exitv);
+}
+END_OF_CODE
+
+cc t.c -o t
+if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then
+ :
+else
+ echo "FAIL: found __db_err calls with newline strings."
+ exit 1
+fi
+
+exit 0
diff --git a/db/test/scr006/chk.offt b/db/test/scr006/chk.offt
new file mode 100644
index 000000000..3dcc181f5
--- /dev/null
+++ b/db/test/scr006/chk.offt
@@ -0,0 +1,35 @@
+#!/bin/sh -
+#
+# Id: chk.offt,v 1.8 2001/10/12 17:55:34 bostic Exp
+#
+# Make sure that no off_t's have snuck into the release.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t=__1
+
+egrep -w off_t $d/*/*.[ch] $d/*/*.in |
+sed -e "/#undef off_t/d" \
+ -e "/mp_fopen.c:.*can't use off_t's here/d" \
+ -e "/mp_fopen.c:.*size or type off_t's or/d" \
+ -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
+ -e "/mutex\/tm.c:/d" \
+ -e "/os_map.c:.*(off_t)0))/d" \
+ -e "/os_rw.c:.*(off_t)db_iop->pgno/d" \
+ -e "/os_seek.c:.*off_t offset;/d" \
+ -e "/os_seek.c:.*offset = /d" \
+ -e "/test_server\/dbs.c:/d" \
+ -e "/test_vxworks\/vx_mutex.c:/d" > $t
+
+test -s $t && {
+ cat $t
+ echo "FAIL: found questionable off_t usage"
+ exit 1
+}
+
+exit 0
diff --git a/db/test/scr007/chk.proto b/db/test/scr007/chk.proto
new file mode 100644
index 000000000..74ce6875b
--- /dev/null
+++ b/db/test/scr007/chk.proto
@@ -0,0 +1,44 @@
+#!/bin/sh -
+#
+# Id: chk.proto,v 1.6 2001/10/12 17:55:34 bostic Exp
+#
+# Check to make sure that prototypes are actually needed.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+egrep '__P' $d/include_auto/*.h |
+ sed -e 's/[ ][ ]*__P.*//' \
+ -e 's/^.*[ *]//' \
+ -e '/__db_cprint/d' \
+ -e '/__db_lprint/d' \
+ -e '/__db_noop_log/d' \
+ -e '/__db_prnpage/d' \
+ -e '/__db_txnlist_print/d' \
+ -e '/__db_util_arg/d' \
+ -e '/__ham_func2/d' \
+ -e '/__ham_func3/d' \
+ -e '/_getpgnos/d' \
+ -e '/_print$/d' \
+ -e '/_read$/d' > $t1
+
+for i in `cat $t1`; do
+ c=`egrep -low $i $d/include/*.in \
+ $d/include/*.h $d/*/*.c $d/*/*.cpp | wc -l`
+ echo "$i: $c"
+done | egrep ' 1$' > $t2
+
+test -s $t2 && {
+ cat $t2
+ echo "FAIL: found unnecessary prototypes."
+ exit 1
+}
+
+exit 0
diff --git a/db/test/scr008/chk.pubdef b/db/test/scr008/chk.pubdef
new file mode 100644
index 000000000..95a30ca12
--- /dev/null
+++ b/db/test/scr008/chk.pubdef
@@ -0,0 +1,179 @@
+#!/bin/sh -
+#
+# Reconcile the list of public defines with the man pages and the Java files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+p=$d/dist/pubdef.in
+
+exitv=0
+
+# Check that pubdef.in has everything listed in m4.links.
+f=$d/docs_src/m4/m4.links
+sed -n \
+ -e 's/^\$1, \(DB_[^,]*\).*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that m4.links has everything listed in pubdef.in.
+f=$d/docs_src/m4/m4.links
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "^.1, $name" $f > /dev/null`; then
+ [ "X$isdoc" != "XD" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isdoc" = "XD" ] && {
+ echo "$name does not appear in $f"
+ exitv=1;
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in db.in.
+f=$d/include/db.in
+sed -n \
+ -e 's/^#define[ ]*\(DB_[A-Z_]*\).*/\1/p' \
+ -e 's/^[ ]*\(DB_[A-Z_]*\)=[0-9].*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that db.in has everything listed in pubdef.in.
+f=$d/include/db.in
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "#define[ ]$name|[ ][ ]*$name=[0-9]" \
+ $f > /dev/null`; then
+ [ "X$isinc" != "XI" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isinc" = "XI" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in DbConstants.java.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed -n -e 's/.*static final int[ ]*\([^ ]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that DbConstants.java has everything listed in pubdef.in.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name =" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in Db.java.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed -n -e 's/.*static final int[ ]*\([^ ;]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1;
+ fi
+done
+sed -n -e 's/^[ ]*\([^ ]*\) = DbConstants\..*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that Db.java has all of the Java case values listed in pubdef.in.
+# Any J entries should appear twice -- once as a static final int, with
+# no initialization value, and once assigned to the DbConstants value. Any
+# C entries should appear once as a static final int, with an initialization
+# value.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name;$" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "= DbConstants.$name;" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep "static final int[ ]$name =.*;" $f > /dev/null`; then
+ [ "X$isjava" != "XC" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XC" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+exit $exitv
diff --git a/db/test/scr009/chk.srcfiles b/db/test/scr009/chk.srcfiles
new file mode 100644
index 000000000..c9e6bff40
--- /dev/null
+++ b/db/test/scr009/chk.srcfiles
@@ -0,0 +1,36 @@
+#!/bin/sh -
+#
+# Id: chk.srcfiles,v 1.9 2001/10/12 17:55:35 bostic Exp
+#
+# Check to make sure we haven't forgotten to add any files to the list
+# of source files Win32 uses to build its dsp files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/srcfiles.in
+t1=__1
+t2=__2
+
+sed -e '/^[ #]/d' \
+ -e '/^$/d' < $f |
+ awk '{print $1}' > $t1
+find $d -type f |
+ sed -e 's/^\.\.\/\.\.\///' \
+ -e '/^test/d' \
+ -e '/^build[^_]/d' |
+ egrep '\.c$|\.cpp$|\.def$|\.rc$' |
+ sed -e '/perl.DB_File\/version.c/d' |
+ sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< srcfiles.in >>> existing files"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/db/test/scr010/chk.str b/db/test/scr010/chk.str
new file mode 100644
index 000000000..15eb9c299
--- /dev/null
+++ b/db/test/scr010/chk.str
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# Id: chk.str,v 1.5 2001/10/12 17:55:36 bostic Exp
+#
+# Check spelling in quoted strings.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__t1
+
+sed -e '/^#include/d' \
+ -e '/revid/d' \
+ -e '/"/!d' \
+ -e 's/^[^"]*//' \
+ -e 's/%s/ /g' \
+ -e 's/[^"]*$//' \
+ -e 's/\\[nt]/ /g' $d/*/*.c $d/*/*.cpp |
+spell | sort | comm -23 /dev/stdin spell.ok > $t1
+
+test -s $t1 && {
+ cat $t1
+ echo "FAIL: found questionable spelling in strings."
+ exit 1
+}
+
+exit 0
diff --git a/db/test/scr010/spell.ok b/db/test/scr010/spell.ok
new file mode 100644
index 000000000..eb69020c8
--- /dev/null
+++ b/db/test/scr010/spell.ok
@@ -0,0 +1,723 @@
+AJVX
+ALLDB
+API
+APP
+AccessExample
+Acflmo
+Ahlm
+BCc
+BDBXXXXXX
+BH
+BI
+BII
+BINTERNAL
+BTREE
+BerkeleyDB
+BtRecExample
+Btree
+CDB
+CDS
+CDdFILTVvX
+CFILpRsv
+CFh
+CONFIG
+CdFILTvX
+DBC
+DBENV
+DBSDIR
+DBT
+DBTYPE
+DBcursor
+DONOTINDEX
+DS
+DUP
+DUPMASTER
+Db
+DbAppendRecno
+DbBtreeCompare
+DbBtreePrefix
+DbBtreeStat
+DbDeadlockException
+DbDupCompare
+DbEnv
+DbEnvFeedback
+DbErrcall
+DbException
+DbFeedback
+DbHash
+DbHashStat
+DbKeyRange
+DbLock
+DbLockNotGrantedException
+DbLockRequest
+DbLockStat
+DbLogStat
+DbLogc
+DbLsn
+DbMemoryException
+DbMpoolFStat
+DbMpoolFile
+DbMpoolStat
+DbPreplist
+DbQueueStat
+DbRecoveryInit
+DbRepTransport
+DbRunRecoveryException
+DbSecondaryKeyCreate
+DbTxn
+DbTxnRecover
+DbTxnStat
+Dbc
+Dbt
+Dde
+Deref'ing
+EIO
+EIRT
+EIi
+ENV
+EnvExample
+EnvInfoDelete
+Exp
+FIXEDLEN
+Fd
+Ff
+FileNotFoundException
+GetJoin
+HOFFSET
+HOLDELECTION
+INDX
+INIT
+IREAD
+ISSET
+IWR
+IWRITE
+Ik
+KEYEMPTY
+KEYEXIST
+KeyRange
+LBTREE
+LOCKDOWN
+LOGC
+LRECNO
+LRU
+LSN
+Lcom
+Ljava
+Ll
+LockExample
+LogRegister
+LpRsS
+MEM
+MMDDhhmm
+MPOOL
+MPOOLFILE
+MapViewOfFile
+Maxid
+Mb
+Mbytes
+Metadata
+Metapage
+Mpool
+MpoolExample
+Mutex
+NEWMASTER
+NEWSITE
+NG
+NODUP
+NODUPDATA
+NOLOCKING
+NOMMAP
+NOMORE
+NOORDERCHK
+NOPANIC
+NOSERVER
+NOSYNC
+NOTFOUND
+NOTGRANTED
+NqV
+NrV
+NsV
+OLDVERSION
+ORDERCHKONLY
+Offpage
+OpenFileMapping
+OutputStream
+PID
+PREV
+RECNO
+RECNOSYNC
+RECNUM
+RINTERNAL
+RMW
+RPC
+RT
+RUNRECOVERY
+Recno
+RepElectResult
+RepProcessMessage
+SERVERPROG
+SERVERVERS
+SETFD
+SS
+Shm
+Sleepycat
+TDS
+TESTDIR
+TID
+TMP
+TMPDIR
+TODO
+TPS
+TXN
+TXNID
+TXNs
+Tcl
+TempFolder
+TestKeyRange
+TestLogc
+TpcbExample
+Txn
+Txns
+UID
+UNAVAIL
+USERMEM
+UnmapViewOfFile
+VM
+VX
+Vv
+VvW
+Vvw
+Vx
+VxWorks
+Waitsfor
+XA
+XxZ
+YY
+abcdef
+abs
+addpage
+addr
+addrem
+adj
+afterop
+ahr
+alldb
+alloc
+alsVv
+amx
+anum
+appl
+archivedir
+arg
+args
+ata
+badkey
+berkdb
+berkeley
+bfname
+bfree
+bigpages
+bnum
+bostic
+bqual
+bsize
+bt
+btcompare
+btrec
+btree
+buf
+bylsn
+bypage
+byteswap
+byteswapped
+bytevalue
+cachesize
+cadjust
+callpgin
+cd
+cdb
+cdel
+ceVv
+ceh
+celmNtV
+celmNtVZ
+cget
+charkey
+charset
+chgpg
+chkpoint
+chkpt
+chksum
+ckp
+clearerr
+clientrun
+cmdargs
+cnt
+compareproc
+compat
+conf
+config
+copypage
+cp
+crdel
+creat
+curadj
+curlsn
+datalen
+db
+dbc
+dbclient
+dbclose
+dbe
+dbenv
+dbkill
+dbm
+dbmclose
+dbminit
+dbobj
+dbopen
+dbp
+dbremove
+dbrename
+dbs
+dbt
+dbtruncate
+dbverify
+dd
+def
+del
+delext
+delim
+df
+dh
+dir
+dirfno
+dist
+dists
+dlen
+dsize
+dup
+dup'ed
+dupcompare
+dups
+dupsort
+efh
+eid
+endian
+env
+envid
+envremove
+eof
+errcall
+errfile
+errno
+errpfx
+excl
+extentsize
+faststat
+fclose
+fcntl
+fcreate
+fd
+ff
+ffactor
+fget
+fh
+fileid
+fileopen
+firstkey
+fiv
+flushcommit
+foo
+fopen
+formatID
+fput
+freelist
+fset
+fstat
+fsync
+ftype
+fv
+gbytes
+gc'ed
+gen
+gettime
+gettimeofday
+gettype
+getval
+gid
+groupalloc
+gtrid
+hashproc
+hcreate
+hdestroy
+hdr
+hostname
+hsearch
+icursor
+idletimeout
+ids
+idup
+iitem
+inc
+incfirst
+indx
+init
+inlen
+inp
+insdel
+int
+io
+iread
+isdeleted
+itemorder
+iter
+iwr
+iwrite
+kb
+kbyte
+kbytes
+keyfirst
+keygroup
+keygroups
+keygrp
+keylast
+keyrange
+killinterval
+killiteration
+klNprRV
+klNprRs
+krinsky
+lM
+lang
+lastid
+ld
+len
+lf
+lg
+libdb
+lk
+llsn
+localhost
+localtime
+lockid
+logc
+logclean
+logfile
+logflush
+lorder
+lpgno
+lsVv
+lsn
+lsynch
+lt
+lu
+luB
+luGB
+luKB
+luKb
+luM
+luMB
+luMb
+lx
+mNs
+machid
+makedup
+malloc
+margo
+maxcommitperflush
+maxkey
+maxlockers
+maxlocks
+maxnactive
+maxnlockers
+maxnlocks
+maxnobjects
+maxobjects
+maxops
+maxtimeout
+maxtxns
+mbytes
+mem
+memp
+metadata
+metaflags
+metagroup
+metalsn
+metapage
+metasub
+methodID
+mincommitperflush
+minkey
+minlocks
+minwrite
+minwrites
+mis
+mkdir
+mlock
+mmap
+mmapped
+mmapsize
+mmetalsn
+mmpgno
+mp
+mpf
+mpgno
+mpool
+msg
+munmap
+mutex
+mutexes
+mutexlocks
+mv
+mvptr
+mydrive
+mydrivexxx
+nO
+nTV
+nTt
+naborts
+nactive
+nbegins
+nbytes
+ncaches
+ncommits
+nconflicts
+ndata
+ndbm
+ndeadlocks
+ndx
+needswap
+nelem
+nevict
+newalloc
+newitem
+newname
+newpage
+newpgno
+nextdup
+nextkey
+nextlsn
+nextnodup
+nextpgno
+ng
+nitems
+nkeys
+nlockers
+nlocks
+nlsn
+nmodes
+nnext
+nnextlsn
+nnowaits
+nobjects
+nodup
+nodupdata
+nogrant
+nolocking
+nommap
+noop
+nooverwrite
+nopanic
+nosort
+nosync
+notfound
+nowait
+nowaits
+npages
+npgno
+nrec
+nrecords
+nreleases
+nrequests
+nrestores
+nsites
+ntasks
+nthreads
+num
+numdup
+obj
+offpage
+olddata
+olditem
+opd
+opflags
+orig
+os
+osynch
+outlen
+ovfl
+ovflpoint
+ovflsize
+ovref
+pageimage
+pagelsn
+pageno
+pagesize
+pagesizes
+pagfno
+panic'ing
+paniccall
+panicstate
+parentid
+perfdb
+pflag
+pg
+pgcookie
+pgdbt
+pget
+pgfree
+pgin
+pgno
+pgnum
+pgout
+pgsize
+pid
+pn
+postdestroy
+postlog
+postlogmeta
+postopen
+postsync
+prec
+predestroy
+preopen
+prev
+prevlsn
+prevnodup
+prheader
+pri
+printlog
+proc
+procs
+pthread
+pthreads
+ptype
+pv
+qam
+qs
+qtest
+rand
+rcuradj
+rdonly
+readonly
+realloc
+rec
+reclength
+recno
+recnum
+recnums
+refcount
+regionmax
+regop
+regsize
+relink
+repl
+revsplitoff
+rf
+rkey
+rlsn
+rm
+rmw
+ro
+rootent
+rootlsn
+rpc
+rsplit
+runlog
+rw
+rwrw
+rwrwrw
+scount
+secon
+secs
+sendproc
+seq
+setto
+setval
+sh
+shalloc
+shm
+shmat
+shmctl
+shmdt
+shmem
+shmget
+shr
+sleepycat
+splitdata
+splitmeta
+srand
+stat
+str
+strdup
+strerror
+strlen
+subdatabase
+subdb
+sv
+svc
+tV
+tVZ
+tas
+tcl
+tcp
+threadID
+tid
+timestamp
+tlen
+tm
+tmp
+tmpdir
+tmutex
+tnum
+tp
+tpcb
+treeorder
+ttpcbddlk
+ttpcbi
+ttpcbr
+ttype
+tx
+txn
+txnarray
+txnid
+txns
+txt
+ubell
+ud
+uid
+ulen
+uncorrect
+undeleting
+unmap
+unpinned
+upd
+upi
+usec
+usecs
+usr
+vZ
+val
+var
+vec
+ver
+vflag
+vrfy
+vw
+vx
+vxmutex
+vxtmp
+waitsfor
+walkdupint
+walkpages
+wb
+wc
+wcount
+wordlist
+writeable
+wt
+xa
+xid
+xxx
+yieldcpu
diff --git a/db/test/scr011/chk.tags b/db/test/scr011/chk.tags
new file mode 100644
index 000000000..cfdc9b8b8
--- /dev/null
+++ b/db/test/scr011/chk.tags
@@ -0,0 +1,41 @@
+#!/bin/sh -
+#
+# Id: chk.tags,v 1.10 2001/10/12 17:55:36 bostic Exp
+#
+# Check to make sure we don't need any more symbolic links to tags files.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && ls -F | egrep / | sort |
+ sed -e 's/\///' \
+ -e '/^CVS$/d' \
+ -e '/^build_vxworks$/d' \
+ -e '/^build_win32$/d' \
+ -e '/^docs$/d' \
+ -e '/^docs_book$/d' \
+ -e '/^docs_src$/d' \
+ -e '/^java$/d' \
+ -e '/^perl$/d' \
+ -e '/^test$/d' \
+ -e '/^test_cxx$/d' \
+ -e '/^test_purify$/d' \
+ -e '/^test_thread$/d' \
+ -e '/^test_vxworks$/d') > $t1
+
+(cd $d && ls */tags | sed 's/\/tags$//' | sort) > $t2
+if diff $t1 $t2 > /dev/null; then
+ exit 0
+else
+ echo "<<< source tree >>> tags files"
+ diff $t1 $t2
+ exit 1
+fi
diff --git a/db/test/scr012/chk.vx_code b/db/test/scr012/chk.vx_code
new file mode 100644
index 000000000..19250480d
--- /dev/null
+++ b/db/test/scr012/chk.vx_code
@@ -0,0 +1,62 @@
+#!/bin/sh -
+#
+# Id: chk.vx_code,v 1.3 2001/10/12 17:55:37 bostic Exp
+#
+# Check to make sure the auto-generated utility code in the VxWorks build
+# directory compiles.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+rm -f t.c t1.c t2.c
+
+header()
+{
+ echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{return ($1(argv[1]));}"
+}
+
+(echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{"
+ echo "int i;") > t1.c
+
+for i in db_archive db_checkpoint db_deadlock \
+ db_dump db_load db_printlog db_recover db_stat db_upgrade db_verify; do
+ echo " compiling build_vxworks/$i"
+ (cat $d/build_vxworks/$i/$i.c; header $i) > t.c
+ if cc -Wall -I.. -I$d/include -I$d/include_auto \
+ t.c $d/clib/getopt.c $d/common/util_arg.c \
+ $d/common/util_log.c $d/common/util_sig.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+
+ cat $d/build_vxworks/$i/$i.c >> t2.c
+ echo "i = $i(argv[1]);" >> t1.c
+done
+
+(cat t2.c t1.c; echo "return (0); }") > t.c
+
+echo " compiling build_vxworks utility composite"
+if cc -Dlint -Wall -I.. -I$d/include -I$d/include_auto \
+ t.c $d/clib/getopt.c $d/common/util_arg.c \
+ $d/common/util_log.c $d/common/util_sig.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile utility composite"
+ exit 1
+fi
+
+exit 0
diff --git a/db/test/scr013/chk.stats b/db/test/scr013/chk.stats
new file mode 100644
index 000000000..9b9299710
--- /dev/null
+++ b/db/test/scr013/chk.stats
@@ -0,0 +1,114 @@
+#!/bin/sh -
+#
+# Id: chk.stats,v 1.4 2001/10/12 17:55:37 bostic Exp
+#
+# Check to make sure all of the stat structure members are included in
+# all of the possible formats.
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t=__tmp
+
+# Extract the field names for a structure from the db.h file.
+inc_fields()
+{
+ sed -e "/struct $1 {/,/^};$/p" \
+ -e d < $d/include/db.in |
+ sed -e 1d \
+ -e '$d' \
+ -e '/;/!d' \
+ -e 's/;.*//' \
+ -e 's/^[ ].*[ \*]//'
+}
+
+cat << END_OF_IGNORE > IGNORE
+bt_maxkey
+bt_metaflags
+hash_metaflags
+qs_metaflags
+qs_ndata
+END_OF_IGNORE
+
+# Check to make sure the elements of a structure from db.h appear in
+# the other files.
+inc()
+{
+ for i in `inc_fields $1`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ for j in $2; do
+ if egrep -w $i $d/$j > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in $j."
+ exitv=1
+ fi
+ done
+ done
+}
+
+inc "__db_bt_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/api_c/db_stat.so"
+inc "__db_h_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/api_c/db_stat.so"
+inc "__db_qam_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/api_c/db_stat.so"
+inc __db_lock_stat \
+ "tcl/tcl_lock.c db_stat/db_stat.c docs_src/api_c/lock_stat.so"
+inc __db_log_stat \
+ "tcl/tcl_log.c db_stat/db_stat.c docs_src/api_c/log_stat.so"
+inc __db_mpool_stat \
+ "tcl/tcl_mp.c db_stat/db_stat.c docs_src/api_c/memp_stat.so"
+inc __db_txn_stat \
+ "tcl/tcl_txn.c db_stat/db_stat.c docs_src/api_c/txn_stat.so"
+
+# Check to make sure the elements from a man page appears in db.in.
+man()
+{
+ for i in `cat $t`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ if egrep -w $i $d/include/db.in > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in db.h."
+ exitv=1
+ fi
+ done
+}
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' < $d/docs_src/api_c/db_stat.so > $t
+man "checking db_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' \
+ -e 's/.* //' < $d/docs_src/api_c/lock_stat.so > $t
+man "checking lock_stat.so against db.h"
+
+sed -e '/m4_stat[12](/!d' \
+ -e 's/.*m4_stat[12](\([^)]*\)).*/\1/' < $d/docs_src/api_c/log_stat.so > $t
+man "checking log_stat.so against db.h"
+
+sed -e '/m4_stat[123](/!d' \
+ -e 's/.*m4_stat[123](\([^)]*\)).*/\1/' < $d/docs_src/api_c/memp_stat.so > $t
+man "checking memp_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(.*, \([^)]*\)).*/\1/' \
+ -e 's/__[LR]B__//g' < $d/docs_src/api_c/txn_stat.so > $t
+man "checking txn_stat.so against db.h"
+
+exit $exitv
diff --git a/db/test/scr014/chk.err b/db/test/scr014/chk.err
new file mode 100644
index 000000000..e43e9206c
--- /dev/null
+++ b/db/test/scr014/chk.err
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# Id: chk.err,v 1.2 2001/10/12 17:55:38 bostic Exp
+#
+# Check to make sure all of the error values have corresponding error
+# message strings in db_strerror().
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__tmp1
+t2=__tmp2
+
+egrep -- "define.*DB_.*-309" $d/include/db.in | awk '{print $2}' > $t1
+sed -e '/^db_strerror/,/^}/{' \
+ -e '/ case DB_/{' \
+ -e 's/:.*//' \
+ -e 's/.* //' \
+ -e p \
+ -e '}' \
+ -e '}' \
+ -e d \
+ < $d/common/db_err.c > $t2
+
+cmp $t1 $t2 > /dev/null ||
+(echo "<<< db.h >>> db_strerror" && diff $t1 $t2 && exit 1)
+
+exit 0
diff --git a/db/test/scr015/README b/db/test/scr015/README
new file mode 100644
index 000000000..9c8a42e86
--- /dev/null
+++ b/db/test/scr015/README
@@ -0,0 +1,36 @@
+# Id: README,v 1.1 2001/05/31 23:09:11 dda Exp
+
+Use the scripts testall or testone to run all, or just one of the C++
+tests. You must be in this directory to run them. For example,
+
+ $ export LIBS="-L/usr/include/BerkeleyDB/lib"
+ $ export CXXFLAGS="-I/usr/include/BerkeleyDB/include"
+ $ export LD_LIBRARY_PATH="/usr/include/BerkeleyDB/lib"
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use c++ in your path. Set environment variables $CXX
+to override this. It will also honor any $CXXFLAGS and $LIBS
+variables that are set, except that -c are silently removed from
+$CXXFLAGS (since we do the compilation in one step).
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_cxx-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LIBS, $CXXFLAGS $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.cpp file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/db/test/scr015/TestConstruct01.cpp b/db/test/scr015/TestConstruct01.cpp
new file mode 100644
index 000000000..56c595f8d
--- /dev/null
+++ b/db/test/scr015/TestConstruct01.cpp
@@ -0,0 +1,331 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestConstruct01.cpp,v 1.3 2001/10/05 01:50:17 bostic Exp
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+#define ERR(a) \
+ do { \
+ cout << "FAIL: " << (a) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR2(a1,a2) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR3(a1,a2,a3) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << ": " << (a3) << "\n"; sysexit(1); \
+ } while (0)
+
+#define CHK(a) \
+ do { \
+ int _ret; \
+ if ((_ret = (a)) != 0) { \
+ ERR3("DB function " #a " has bad return", _ret, DbEnv::strerror(_ret)); \
+ } \
+ } while (0)
+
+#ifdef VERBOSE
+#define DEBUGOUT(a) cout << a << "\n"
+#else
+#define DEBUGOUT(a)
+#endif
+
+#define CONSTRUCT01_DBNAME "construct01.db"
+#define CONSTRUCT01_DBDIR "."
+#define CONSTRUCT01_DBFULLPATH (CONSTRUCT01_DBDIR "/" CONSTRUCT01_DBNAME)
+
+int itemcount; // count the number of items in the database
+
+// A good place to put a breakpoint...
+//
+void sysexit(int status)
+{
+ exit(status);
+}
+
+void check_file_removed(const char *name, int fatal)
+{
+ unlink(name);
+#if 0
+ if (access(name, 0) == 0) {
+ if (fatal)
+ cout << "FAIL: ";
+ cout << "File \"" << name << "\" still exists after run\n";
+ if (fatal)
+ sysexit(1);
+ }
+#endif
+}
+
+// Check that key/data for 0 - count-1 are already present,
+// and write a key/data for count. The key and data are
+// both "0123...N" where N == count-1.
+//
+// For some reason on Windows, we need to open using the full pathname
+// of the file when there is no environment, thus the 'has_env'
+// variable.
+//
+void rundb(Db *db, int count, int has_env)
+{
+ const char *name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db->set_error_stream(&cerr);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ CHK(db->set_pagesize(1024));
+ CHK(db->open(name, NULL, DB_BTREE,
+ count ? 0 : DB_CREATE, 0664));
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ char outbuf[10];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = '0' + i;
+ }
+ outbuf[i++] = '\0';
+ Dbt key(outbuf, i);
+ Dbt data(outbuf, i);
+
+ DEBUGOUT("Put: " << outbuf);
+ CHK(db->put(0, &key, &data, DB_NOOVERWRITE));
+
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ CHK(db->cursor(NULL, &dbcp, 0));
+
+ // Walk through the table, checking
+ Dbt readkey;
+ Dbt readdata;
+ while (dbcp->get(&readkey, &readdata, DB_NEXT) == 0) {
+ char *key_string = (char *)readkey.get_data();
+ char *data_string = (char *)readdata.get_data();
+ DEBUGOUT("Got: " << key_string << ": " << data_string);
+ int len = strlen(key_string);
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad");
+ }
+ else if (strcmp(data_string, key_string) != 0) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string[i] != ('0' + i)) {
+ cout << " got " << key_string
+ << " (" << (int)key_string[i] << ")"
+ << ", wanted " << i
+ << " (" << (int)('0' + i) << ")"
+ << " at position " << i << "\n";
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ cout << " expected more keys, bitmap is: " << expected << "\n";
+ ERR("missing keys in database");
+ }
+ CHK(dbcp->close());
+ CHK(db->close(0));
+}
+
+void t1(int except_flag)
+{
+ cout << " Running test 1:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t2(int except_flag)
+{
+ cout << " Running test 2:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t3(int except_flag)
+{
+ cout << " Running test 3:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t4(int except_flag)
+{
+ cout << " Running test 4:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ CHK(db.close(0));
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t5(int except_flag)
+{
+ cout << " Running test 5:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ rundb(&db, itemcount++, 1);
+ // Note we cannot reuse the old Db!
+ Db anotherdb(&env, 0);
+
+ anotherdb.set_errpfx("test5");
+ rundb(&anotherdb, itemcount++, 1);
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t6(int except_flag)
+{
+ cout << " Running test 6:\n";
+
+ /* From user [#2939] */
+ int err;
+
+ DbEnv* penv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ penv->set_cachesize(0, 32 * 1024, 0);
+ penv->open(CONSTRUCT01_DBDIR, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL, 0);
+
+ //LEAK: remove this block and leak disappears
+ Db* pdb = new Db(penv,0);
+ if ((err = pdb->close(0)) != 0) {
+ fprintf(stderr, "Error closing Db: %s\n", db_strerror(err));
+ }
+ delete pdb;
+ //LEAK: remove this block and leak disappears
+
+ if ((err = penv->close(0)) != 0) {
+ fprintf(stderr, "Error closing DbEnv: %s\n", db_strerror(err));
+ }
+ delete penv;
+
+ // Make sure we get a message from C++ layer reminding us to close.
+ cerr << "expected error: ";
+ {
+ DbEnv foo(DB_CXX_NO_EXCEPTIONS);
+ foo.open(CONSTRUCT01_DBDIR, DB_CREATE, 0);
+ }
+ cerr << "should have received error.\n";
+ cout << " finished.\n";
+}
+
+// remove any existing environment or database
+void removeall()
+{
+ {
+ DbEnv tmpenv(DB_CXX_NO_EXCEPTIONS);
+ (void)tmpenv.remove(CONSTRUCT01_DBDIR, DB_FORCE);
+ }
+
+ check_file_removed(CONSTRUCT01_DBFULLPATH, 1);
+ for (int i=0; i<8; i++) {
+ char buf[20];
+ sprintf(buf, "__db.00%d", i);
+ check_file_removed(buf, 1);
+ }
+}
+
+int doall(int except_flag)
+{
+ itemcount = 0;
+ try {
+ // before and after the run, removing any
+ // old environment/database.
+ //
+ removeall();
+ t1(except_flag);
+ t2(except_flag);
+ t3(except_flag);
+ t4(except_flag);
+ t5(except_flag);
+ t6(except_flag);
+
+ removeall();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ ERR2("EXCEPTION RECEIVED", dbe.what());
+ }
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ int iterations = 1;
+ if (argc > 1) {
+ iterations = atoi(argv[1]);
+ if (iterations < 0) {
+ ERR("Usage: construct01 count");
+ }
+ }
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ cout << "(" << i << "/" << iterations << ") ";
+ }
+ cout << "construct01 running:\n";
+ if (doall(DB_CXX_NO_EXCEPTIONS) != 0) {
+ ERR("SOME TEST FAILED FOR NO-EXCEPTION TEST");
+ }
+ else if (doall(0) != 0) {
+ ERR("SOME TEST FAILED FOR EXCEPTION TEST");
+ }
+ else {
+ cout << "\nALL TESTS SUCCESSFUL\n";
+ }
+ }
+ return 0;
+}
diff --git a/db/test/scr015/TestConstruct01.testerr b/db/test/scr015/TestConstruct01.testerr
new file mode 100644
index 000000000..1ba627d10
--- /dev/null
+++ b/db/test/scr015/TestConstruct01.testerr
@@ -0,0 +1,4 @@
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
diff --git a/db/test/scr015/TestConstruct01.testout b/db/test/scr015/TestConstruct01.testout
new file mode 100644
index 000000000..9b840f9fc
--- /dev/null
+++ b/db/test/scr015/TestConstruct01.testout
@@ -0,0 +1,27 @@
+(0/1) construct01 running:
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+
+ALL TESTS SUCCESSFUL
diff --git a/db/test/scr015/TestExceptInclude.cpp b/db/test/scr015/TestExceptInclude.cpp
new file mode 100644
index 000000000..8f0c142e3
--- /dev/null
+++ b/db/test/scr015/TestExceptInclude.cpp
@@ -0,0 +1,25 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestExceptInclude.cpp,v 1.1 2001/05/31 23:09:12 dda Exp
+ */
+
+/* We should be able to include cxx_except.h without db_cxx.h,
+ * and use the DbException class.
+ *
+ * This program does nothing, it's just here to make sure
+ * the compilation works.
+ */
+#include "cxx_except.h"
+
+int main(int argc, char *argv[])
+{
+ DbException *dbe = new DbException("something");
+ DbMemoryException *dbme = new DbMemoryException("anything");
+
+ dbe = dbme;
+}
+
diff --git a/db/test/scr015/TestGetSetMethods.cpp b/db/test/scr015/TestGetSetMethods.cpp
new file mode 100644
index 000000000..f6d66dcee
--- /dev/null
+++ b/db/test/scr015/TestGetSetMethods.cpp
@@ -0,0 +1,91 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestGetSetMethods.cpp,v 1.3 2001/10/05 01:50:17 bostic Exp
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *dbenv = new DbEnv(0);
+ DbTxn *dbtxn;
+ u_int8_t conflicts[10];
+
+ dbenv->set_error_stream(&cerr);
+ dbenv->set_timeout(0x90000000,
+ DB_SET_LOCK_TIMEOUT);
+ dbenv->set_lg_bsize(0x1000);
+ dbenv->set_lg_dir(".");
+ dbenv->set_lg_max(0x10000000);
+ dbenv->set_lg_regionmax(0x100000);
+ dbenv->set_lk_conflicts(conflicts, sizeof(conflicts));
+ dbenv->set_lk_detect(DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv->set_lk_max(0);
+ dbenv->set_lk_max_lockers(100);
+ dbenv->set_lk_max_locks(10);
+ dbenv->set_lk_max_objects(1000);
+ dbenv->set_mp_mmapsize(0x10000);
+ dbenv->set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv->open(".", DB_CREATE | DB_INIT_TXN |
+ DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL,
+ 0644);
+
+ dbenv->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
+ dbtxn->set_timeout(0xA0000000, DB_SET_TXN_TIMEOUT);
+ dbtxn->abort();
+
+ dbenv->close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db *db_bt = new Db(NULL, 0);
+ db_bt->set_bt_maxkey(10000);
+ db_bt->set_bt_minkey(100);
+ db_bt->set_cachesize(0, 0x100000, 0);
+ db_bt->close(0);
+
+ Db *db_h = new Db(NULL, 0);
+ db_h->set_h_ffactor(0x10);
+ db_h->set_h_nelem(100);
+ db_h->set_lorder(0);
+ db_h->set_pagesize(0x10000);
+ db_h->close(0);
+
+ Db *db_re = new Db(NULL, 0);
+ db_re->set_re_delim('@');
+ db_re->set_re_pad(10);
+ db_re->set_re_source("re.in");
+ db_re->close(0);
+
+ Db *db_q = new Db(NULL, 0);
+ db_q->set_q_extentsize(200);
+ db_q->close(0);
+
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what() << "\n";
+ }
+ return 0;
+}
diff --git a/db/test/scr015/TestKeyRange.cpp b/db/test/scr015/TestKeyRange.cpp
new file mode 100644
index 000000000..62592aee3
--- /dev/null
+++ b/db/test/scr015/TestKeyRange.cpp
@@ -0,0 +1,171 @@
+/*NOTE: AccessExample changed to test Db.key_range.
+ * We made a global change of /AccessExample/TestKeyRange/,
+ * the only other changes are marked with comments that
+ * are notated as 'ADDED'.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestKeyRange.cpp,v 1.2 2001/10/09 20:58:35 dda Exp
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+class TestKeyRange
+{
+public:
+ TestKeyRange();
+ void run();
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ TestKeyRange(const TestKeyRange &);
+ void operator = (const TestKeyRange &);
+};
+
+static void usage(); // forward
+
+int main(int argc, char *argv[])
+{
+ if (argc > 1) {
+ usage();
+ }
+
+ // Use a try block just to report any errors.
+ // An alternate approach to using exceptions is to
+ // use error models (see DbEnv::set_error_model()) so
+ // that error codes are returned for all Berkeley DB methods.
+ //
+ try {
+ TestKeyRange app;
+ app.run();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+static void usage()
+{
+ cerr << "usage: TestKeyRange\n";
+ exit(1);
+}
+
+const char TestKeyRange::FileName[] = "access.db";
+
+TestKeyRange::TestKeyRange()
+{
+}
+
+void TestKeyRange::run()
+{
+ // Remove the previous database.
+ (void)unlink(FileName);
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db db(0, 0);
+
+ db.set_error_stream(&cerr);
+ db.set_errpfx("TestKeyRange");
+ db.set_pagesize(1024); /* Page size: 1K. */
+ db.set_cachesize(0, 32 * 1024, 0);
+ db.open(FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ char buf[1024];
+ char rbuf[1024];
+ char *t;
+ char *p;
+ int ret;
+ int len;
+ Dbt *firstkey = NULL;
+ char firstbuf[1024];
+
+ for (;;) {
+ cout << "input>";
+ cout.flush();
+
+ cin.getline(buf, sizeof(buf));
+ if (cin.eof())
+ break;
+
+ if ((len = strlen(buf)) <= 0)
+ continue;
+ for (t = rbuf, p = buf + (len - 1); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ Dbt key(buf, len + 1);
+ Dbt data(rbuf, len + 1);
+ if (firstkey == NULL) {
+ strcpy(firstbuf, buf);
+ firstkey = new Dbt(firstbuf, len + 1);
+ }
+
+ ret = db.put(0, &key, &data, DB_NOOVERWRITE);
+ if (ret == DB_KEYEXIST) {
+ cout << "Key " << buf << " already exists.\n";
+ }
+ cout << "\n";
+ }
+
+ // We put a try block around this section of code
+ // to ensure that our database is properly closed
+ // in the event of an error.
+ //
+ try {
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ db.cursor(NULL, &dbcp, 0);
+
+ /*ADDED...*/
+ DB_KEY_RANGE range;
+ memset(&range, 0, sizeof(range));
+
+ db.key_range(NULL, firstkey, &range, 0);
+ printf("less: %f\n", range.less);
+ printf("equal: %f\n", range.equal);
+ printf("greater: %f\n", range.greater);
+ /*end ADDED*/
+
+ Dbt key;
+ Dbt data;
+
+ // Walk through the table, printing the key/data pairs.
+ while (dbcp->get(&key, &data, DB_NEXT) == 0) {
+ char *key_string = (char *)key.get_data();
+ char *data_string = (char *)data.get_data();
+ cout << key_string << " : " << data_string << "\n";
+ }
+ dbcp->close();
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ }
+
+ db.close(0);
+}
diff --git a/db/test/scr015/TestKeyRange.testin b/db/test/scr015/TestKeyRange.testin
new file mode 100644
index 000000000..a2b6bd74e
--- /dev/null
+++ b/db/test/scr015/TestKeyRange.testin
@@ -0,0 +1,8 @@
+first line is alphabetically somewhere in the middle.
+Blah blah
+let's have exactly eight lines of input.
+stuff
+more stuff
+and even more stuff
+lastly
+but not leastly.
diff --git a/db/test/scr015/TestKeyRange.testout b/db/test/scr015/TestKeyRange.testout
new file mode 100644
index 000000000..25b2e1a83
--- /dev/null
+++ b/db/test/scr015/TestKeyRange.testout
@@ -0,0 +1,19 @@
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>less: 0.375000
+equal: 0.125000
+greater: 0.500000
+Blah blah : halb halB
+and even more stuff : ffuts erom neve dna
+but not leastly. : .yltsael ton tub
+first line is alphabetically somewhere in the middle. : .elddim eht ni erehwemos yllacitebahpla si enil tsrif
+lastly : yltsal
+let's have exactly eight lines of input. : .tupni fo senil thgie yltcaxe evah s'tel
+more stuff : ffuts erom
+stuff : ffuts
diff --git a/db/test/scr015/TestLogc.cpp b/db/test/scr015/TestLogc.cpp
new file mode 100644
index 000000000..d6d0003cd
--- /dev/null
+++ b/db/test/scr015/TestLogc.cpp
@@ -0,0 +1,92 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestLogc.cpp,v 1.3 2001/10/12 13:02:31 dda Exp
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+static void show_dbt(ostream &os, Dbt *dbt)
+{
+ int i;
+ int size = dbt->get_size();
+ unsigned char *data = (unsigned char *)dbt->get_data();
+
+ os << "size: " << size << " data: ";
+ for (i=0; i<size && i<10; i++) {
+ os << (int)data[i] << " ";
+ }
+ if (i<size)
+ os << "...";
+}
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *env = new DbEnv(0);
+ env->open(".", DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db *db1 = new Db(env, 0);
+ db1->open("first.db", NULL, DB_BTREE, DB_CREATE, 0);
+ Dbt *key = new Dbt((char *)"a", 1);
+ Dbt *data = new Dbt((char *)"b", 1);
+ db1->put(NULL, key, data, 0);
+ key->set_data((char *)"c");
+ data->set_data((char *)"d");
+ db1->put(NULL, key, data, 0);
+ db1->close(0);
+
+ Db *db2 = new Db(env, 0);
+ db2->open("second.db", NULL, DB_BTREE, DB_CREATE, 0);
+ key->set_data((char *)"w");
+ data->set_data((char *)"x");
+ db2->put(NULL, key, data, 0);
+ key->set_data((char *)"y");
+ data->set_data((char *)"z");
+ db2->put(NULL, key, data, 0);
+ db2->close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc *logc;
+
+ env->log_cursor(&logc, 0);
+ int ret = 0;
+ DbLsn lsn;
+ Dbt *dbt = new Dbt();
+ u_int32_t flags = DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc->get(&lsn, dbt, flags)) == 0) {
+ cout << "logc.get: " << count;
+
+ // We ignore the contents of the log record,
+ // it's not portable.
+ //
+ // show_dbt(cout, dbt);
+ //
+
+ cout << "\n";
+ count++;
+ flags = DB_NEXT;
+ }
+ if (ret != DB_NOTFOUND) {
+ cerr << "*** Failed to get log record, returned: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ logc->close(0);
+ cout << "TestLogc done.\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/db/test/scr015/TestLogc.testout b/db/test/scr015/TestLogc.testout
new file mode 100644
index 000000000..858947464
--- /dev/null
+++ b/db/test/scr015/TestLogc.testout
@@ -0,0 +1,18 @@
+logc.get: 0
+logc.get: 1
+logc.get: 2
+logc.get: 3
+logc.get: 4
+logc.get: 5
+logc.get: 6
+logc.get: 7
+logc.get: 8
+logc.get: 9
+logc.get: 10
+logc.get: 11
+logc.get: 12
+logc.get: 13
+logc.get: 14
+logc.get: 15
+logc.get: 16
+TestLogc done.
diff --git a/db/test/scr015/TestSimpleAccess.cpp b/db/test/scr015/TestSimpleAccess.cpp
new file mode 100644
index 000000000..fb8714cff
--- /dev/null
+++ b/db/test/scr015/TestSimpleAccess.cpp
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestSimpleAccess.cpp,v 1.3 2001/10/05 01:50:17 bostic Exp
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open("my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char *)"key", 4);
+ Dbt *datadbt = new Dbt((char *)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char *)"key", 4);
+ Dbt *badkeydbt = new Dbt((char *)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/db/test/scr015/TestSimpleAccess.testout b/db/test/scr015/TestSimpleAccess.testout
new file mode 100644
index 000000000..dc88d4788
--- /dev/null
+++ b/db/test/scr015/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/db/test/scr015/TestTruncate.cpp b/db/test/scr015/TestTruncate.cpp
new file mode 100644
index 000000000..49024299f
--- /dev/null
+++ b/db/test/scr015/TestTruncate.cpp
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestTruncate.cpp,v 1.3 2001/10/05 01:50:17 bostic Exp
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open("my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char*)"key", 4);
+ Dbt *datadbt = new Dbt((char*)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char*)"key", 4);
+ Dbt *badkeydbt = new Dbt((char*)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ cout << "truncating data...\n";
+ u_int32_t nrecords;
+ db->truncate(NULL, &nrecords, 0);
+ cout << "truncate returns " << nrecords << "\n";
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "after truncate get: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ db->close(0);
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/db/test/scr015/TestTruncate.testout b/db/test/scr015/TestTruncate.testout
new file mode 100644
index 000000000..0a4bc9816
--- /dev/null
+++ b/db/test/scr015/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after truncate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/db/test/scr015/chk.cxxtests b/db/test/scr015/chk.cxxtests
new file mode 100644
index 000000000..84b7901aa
--- /dev/null
+++ b/db/test/scr015/chk.cxxtests
@@ -0,0 +1,69 @@
+#!/bin/sh -
+#
+# Id: chk.cxxtests,v 1.2 2001/10/12 13:02:31 dda Exp
+#
+# Check to make sure that regression tests for C++ run.
+
+TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory
+CXX=${CXX:-c++}
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../include"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -f libdb.a ] || make libdb.a || {
+ echo 'FAIL: unable to build libdb.a'
+ exit 1
+}
+[ -f libdb_cxx.a ] || make libdb_cxx.a || {
+ echo 'FAIL: unable to build libdb_cxx.a'
+ exit 1
+}
+testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then
+ echo " **** cxx test $testname ignored"
+ continue
+ fi
+
+ echo " ==== cxx test $testname"
+ rm -rf TESTCXX; mkdir TESTCXX
+ cd ./TESTCXX
+ testprefix=../$TEST_CXX_SRCDIR/$testname
+ ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || {
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ./$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTCXX
+exit 0
diff --git a/db/test/scr015/ignore b/db/test/scr015/ignore
new file mode 100644
index 000000000..cdf4c87c1
--- /dev/null
+++ b/db/test/scr015/ignore
@@ -0,0 +1,4 @@
+#
+# Id: ignore,v 1.3 2001/10/12 13:02:32 dda Exp
+#
+# A list of tests to ignore
diff --git a/db/test/scr015/testall b/db/test/scr015/testall
new file mode 100644
index 000000000..e9883c4c3
--- /dev/null
+++ b/db/test/scr015/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# Id: testall,v 1.3 2001/09/13 14:49:36 dda Exp
+#
+# Run all the C++ regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.cpp -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.cpp$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** cxx test $name ignored"
+ else
+ echo " ==== cxx test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/db/test/scr015/testone b/db/test/scr015/testone
new file mode 100644
index 000000000..e9e8c6b93
--- /dev/null
+++ b/db/test/scr015/testone
@@ -0,0 +1,121 @@
+#!/bin/sh -
+# Id: testone,v 1.3 2001/09/11 19:14:16 dda Exp
+#
+# Run just one C++ regression test, the single argument
+# is the basename of the test, e.g. TestRpcServer
+
+error()
+{
+ echo '' >&2
+ echo "C++ regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+gdbflag=n
+CXX=${CXX:-c++}
+LIBS=${LIBS:-}
+
+# remove any -c option in the CXXFLAGS
+CXXFLAGS="`echo " ${CXXFLAGS} " | sed -e 's/ -c //g'`"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ LIBS="-L$prefix/lib -ldb_cxx $LIBS"
+ CXXFLAGS="-I$prefix/include $CXXFLAGS"
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ --gdb )
+ CXXFLAGS="-g $CXXFLAGS"
+ gdbflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+${CXX} ${CXXFLAGS} -o $name ../$name.cpp ${LIBS} > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$gdbflag" = y ]; then
+ if [ -s $infile ]; then
+ echo "Input file is $infile"
+ fi
+ gdb ./$name
+ exit 0
+elif [ "$stdinflag" = y ]; then
+ ./$name >../$name.out 2>../$name.err
+else
+ ./$name <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/db/test/scr016/CallbackTest.java b/db/test/scr016/CallbackTest.java
new file mode 100644
index 000000000..587e6840e
--- /dev/null
+++ b/db/test/scr016/CallbackTest.java
@@ -0,0 +1,83 @@
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+
+public class CallbackTest
+{
+ public static void main(String args[])
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.set_bt_compare(new BtreeCompare());
+ db.open("test.db", "", Db.DB_BTREE, Db.DB_CREATE, 0666);
+ StringDbt[] keys = new StringDbt[10];
+ StringDbt[] datas = new StringDbt[10];
+ for (int i = 0; i<10; i++) {
+ int val = (i * 3) % 10;
+ keys[i] = new StringDbt("key" + val);
+ datas[i] = new StringDbt("data" + val);
+ System.out.println("put " + val);
+ db.put(null, keys[i], datas[i], 0);
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("FAIL: " + dbe);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("FAIL: " + fnfe);
+ }
+
+ }
+
+
+}
+
+class BtreeCompare
+ implements DbBtreeCompare
+{
+ /* A weird comparator, for example.
+ * In fact, it may not be legal, since it's not monotonically increasing.
+ */
+ public int bt_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare function called");
+ byte b1[] = dbt1.get_data();
+ byte b2[] = dbt2.get_data();
+ System.out.println(" " + (new String(b1)) + ", " + (new String(b2)));
+ int len1 = b1.length;
+ int len2 = b2.length;
+ if (len1 != len2)
+ return (len1 < len2) ? 1 : -1;
+ int value = 1;
+ for (int i=0; i<len1; i++) {
+ if (b1[i] != b2[i])
+ return (b1[i] < b2[i]) ? value : -value;
+ value *= -1;
+ }
+ return 0;
+ }
+}
+
+class StringDbt extends Dbt
+{
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+}
diff --git a/db/test/scr016/CallbackTest.testout b/db/test/scr016/CallbackTest.testout
new file mode 100644
index 000000000..23cd113a7
--- /dev/null
+++ b/db/test/scr016/CallbackTest.testout
@@ -0,0 +1,56 @@
+put 0
+put 3
+compare function called
+ key3, key0
+put 6
+compare function called
+ key6, key3
+put 9
+compare function called
+ key9, key6
+put 2
+compare function called
+ key2, key9
+compare function called
+ key2, key0
+compare function called
+ key2, key6
+compare function called
+ key2, key3
+compare function called
+ key2, key0
+put 5
+compare function called
+ key5, key3
+compare function called
+ key5, key9
+compare function called
+ key5, key6
+put 8
+compare function called
+ key8, key5
+compare function called
+ key8, key9
+compare function called
+ key8, key6
+put 1
+compare function called
+ key1, key5
+compare function called
+ key1, key2
+compare function called
+ key1, key0
+put 4
+compare function called
+ key4, key5
+compare function called
+ key4, key2
+compare function called
+ key4, key3
+put 7
+compare function called
+ key7, key4
+compare function called
+ key7, key8
+compare function called
+ key7, key6
diff --git a/db/test/scr016/README b/db/test/scr016/README
new file mode 100644
index 000000000..0b8a87ef0
--- /dev/null
+++ b/db/test/scr016/README
@@ -0,0 +1,37 @@
+# Id: README,v 1.2 2001/05/31 23:09:10 dda Exp
+
+Use the scripts testall or testone to run all, or just one of the Java
+tests. You must be in this directory to run them. For example,
+
+ $ export LD_LIBRARY_PATH=/usr/local/Berkeley3.3/lib
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use javac and java in your path. Set environment
+variables $JAVAC and $JAVA to override this. It will also and honor
+any $CLASSPATH that is already set, prepending ../../../../classes to
+it, which is where the test .class files are put, and where the DB
+.class files can normally be found after a build on Unix and Windows.
+If none of these variables are set, everything will probably work
+with whatever java/javac is in your path.
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_java-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.java file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/db/test/scr016/TestAppendRecno.java b/db/test/scr016/TestAppendRecno.java
new file mode 100644
index 000000000..5021ddbac
--- /dev/null
+++ b/db/test/scr016/TestAppendRecno.java
@@ -0,0 +1,259 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestAppendRecno.java,v 1.2 2001/10/05 02:36:08 bostic Exp
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestAppendRecno
+ implements DbAppendRecno
+{
+ private static final String FileName = "access.db";
+ int callback_count = 0;
+ Db table = null;
+
+ public TestAppendRecno()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAppendRecno\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAppendRecno app = new TestAppendRecno();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAppendRecno: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAppendRecno: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestAppendRecno");
+ //table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table.set_append_recno(this);
+
+ table.open(FileName, null, Db.DB_RECNO, Db.DB_CREATE, 0644);
+ for (int i=0; i<10; i++) {
+ System.out.println("\n*** Iteration " + i );
+ try {
+ RecnoDbt key = new RecnoDbt(77+i);
+ StringDbt data = new StringDbt("data" + i + "_xyz");
+ table.put(null, key, data, Db.DB_APPEND);
+ }
+ catch (DbException dbe) {
+ System.out.println("dbe: " + dbe);
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ RecnoDbt key = new RecnoDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getRecno() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ System.out.println("Test finished.");
+ }
+
+ public void db_append_recno(Db db, Dbt dbt, int recno)
+ throws DbException
+ {
+ int count = callback_count++;
+
+ System.out.println("====\ncallback #" + count);
+ System.out.println("db is table: " + (db == table));
+ System.out.println("recno = " + recno);
+
+ // This gives variable output.
+ //System.out.println("dbt = " + dbt);
+ if (dbt instanceof RecnoDbt) {
+ System.out.println("dbt = " +
+ ((RecnoDbt)dbt).getRecno());
+ }
+ else if (dbt instanceof StringDbt) {
+ System.out.println("dbt = " +
+ ((StringDbt)dbt).getString());
+ }
+ else {
+ // Note: the dbts are created out of whole
+ // cloth by Berkeley DB, not us!
+ System.out.println("internally created dbt: " +
+ new StringDbt(dbt) + ", size " +
+ dbt.get_size());
+ }
+
+ switch (count) {
+ case 0:
+ // nothing
+ break;
+
+ case 1:
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 2:
+ System.out.println("throwing...");
+ throw new DbException("append_recno thrown");
+ //not reached
+
+ case 3:
+ // Should result in an error (size unchanged).
+ dbt.set_offset(1);
+ break;
+
+ case 4:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 5:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 2);
+ break;
+
+ case 6:
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(3);
+ break;
+
+ case 7:
+ // Should result in an error.
+ dbt.set_data(null);
+ break;
+
+ case 8:
+ // Should result in an error.
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(4);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+
+ // Here's an example of how you can extend a Dbt to store recno's.
+ //
+ static /*inner*/
+ class RecnoDbt extends Dbt
+ {
+ RecnoDbt()
+ {
+ this(0); // let other constructor do most of the work
+ }
+
+ RecnoDbt(int value)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[4];
+ set_data(arr); // use our local array for data
+ set_ulen(4); // size of return storage
+ setRecno(value);
+ }
+
+ public String toString() /*override*/
+ {
+ return String.valueOf(getRecno());
+ }
+
+ void setRecno(int value)
+ {
+ set_recno_key_data(value);
+ set_size(arr.length);
+ }
+
+ int getRecno()
+ {
+ return get_recno_key_data();
+ }
+
+ byte arr[];
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt(Dbt dbt)
+ {
+ set_data(dbt.get_data());
+ set_size(dbt.get_size());
+ }
+
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString() /*override*/
+ {
+ return getString();
+ }
+ }
+}
+
diff --git a/db/test/scr016/TestAppendRecno.testout b/db/test/scr016/TestAppendRecno.testout
new file mode 100644
index 000000000..970174e7a
--- /dev/null
+++ b/db/test/scr016/TestAppendRecno.testout
@@ -0,0 +1,82 @@
+
+*** Iteration 0
+====
+callback #0
+db is table: true
+recno = 1
+internally created dbt: data0_xyz, size 9
+
+*** Iteration 1
+====
+callback #1
+db is table: true
+recno = 2
+internally created dbt: data1_xyz, size 9
+
+*** Iteration 2
+====
+callback #2
+db is table: true
+recno = 3
+internally created dbt: data2_xyz, size 9
+throwing...
+dbe: com.sleepycat.db.DbException: append_recno thrown
+
+*** Iteration 3
+====
+callback #3
+db is table: true
+recno = 3
+internally created dbt: data3_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 4
+====
+callback #4
+db is table: true
+recno = 3
+internally created dbt: data4_xyz, size 9
+
+*** Iteration 5
+====
+callback #5
+db is table: true
+recno = 4
+internally created dbt: data5_xyz, size 9
+
+*** Iteration 6
+====
+callback #6
+db is table: true
+recno = 5
+internally created dbt: data6_xyz, size 9
+
+*** Iteration 7
+====
+callback #7
+db is table: true
+recno = 6
+internally created dbt: data7_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.data is null
+
+*** Iteration 8
+====
+callback #8
+db is table: true
+recno = 6
+internally created dbt: data8_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 9
+====
+callback #9
+db is table: true
+recno = 6
+internally created dbt: data9_xyz, size 9
+1 : data0_xyz
+2 : data1_xy
+3 : ata4_xyz
+4 : ata5_xy
+5 : abc
+6 : data9_xyz
+Test finished.
diff --git a/db/test/scr016/TestAssociate.java b/db/test/scr016/TestAssociate.java
new file mode 100644
index 000000000..3bf210831
--- /dev/null
+++ b/db/test/scr016/TestAssociate.java
@@ -0,0 +1,333 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestAssociate.java,v 1.2 2001/10/05 02:36:08 bostic Exp
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Hashtable;
+
+public class TestAssociate
+ implements DbDupCompare
+{
+ private static final String FileName = "access.db";
+ public static Db saveddb1 = null;
+ public static Db saveddb2 = null;
+
+ public TestAssociate()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAssociate\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAssociate app = new TestAssociate();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAssociate: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAssociate: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public static int counter = 0;
+ public static String results[] = { "abc", "def", "ghi", "JKL", "MNO", null };
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ /*
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ */
+ return results[counter++];
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ static public String shownull(Object o)
+ {
+ if (o == null)
+ return "null";
+ else
+ return "not null";
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open("./", Db.DB_CREATE|Db.DB_INIT_MPOOL, 0644);
+ (new java.io.File(FileName)).delete();
+ Db table = new Db(dbenv, 0);
+ Db table2 = new Db(dbenv, 0);
+ table.set_dup_compare(this);
+ table2.set_dup_compare(this);
+ table2.set_flags(Db.DB_DUP | Db.DB_DUPSORT);
+ table.set_error_stream(System.err);
+ table2.set_error_stream(System.err);
+ table.set_errpfx("TestAssociate");
+ table2.set_errpfx("TestAssociate(table2)");
+ System.out.println("Primary database is " + shownull(table));
+ System.out.println("Secondary database is " + shownull(table2));
+ saveddb1 = table;
+ saveddb2 = table2;
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table2.open(FileName + "2", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table.associate(table2, new Capitalize(), 0);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\ndef\njhi");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table2.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ StringDbt pkey = new StringDbt();
+
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+
+ key.setString("BC");
+ System.out.println("get BC returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget BC returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+ key.setString("KL");
+ System.out.println("get KL returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget KL returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString()
+ {
+ return "StringDbt=" + getString();
+ }
+ }
+
+ /* creates a stupid secondary index as follows:
+ For an N letter key, we use N-1 letters starting at
+ position 1. If the new letters are already capitalized,
+ we return the old array, but with offset set to 1.
+ If the letters are not capitalized, we create a new,
+ capitalized array. This is pretty stupid for
+ an application, but it tests all the paths in the runtime.
+ */
+ public static class Capitalize implements DbSecondaryKeyCreate
+ {
+ public int secondary_key_create(Db secondary, Dbt key, Dbt value,
+ Dbt result)
+ throws DbException
+ {
+ String which = "unknown db";
+ if (saveddb1.equals(secondary)) {
+ which = "primary";
+ }
+ else if (saveddb2.equals(secondary)) {
+ which = "secondary";
+ }
+ System.out.println("secondary_key_create, Db: " + shownull(secondary) + "(" + which + "), key: " + show_dbt(key) + ", data: " + show_dbt(value));
+ int len = key.get_size();
+ byte[] arr = key.get_data();
+ boolean capped = true;
+
+ if (len < 1)
+ throw new DbException("bad key");
+
+ if (len < 2)
+ return Db.DB_DONOTINDEX;
+
+ result.set_size(len - 1);
+ for (int i=1; capped && i<len; i++) {
+ if (!Character.isUpperCase((char)arr[i]))
+ capped = false;
+ }
+ if (capped) {
+ System.out.println(" creating key(1): " + new String(arr, 1, len-1));
+ result.set_data(arr);
+ result.set_offset(1);
+ }
+ else {
+ System.out.println(" creating key(2): " + (new String(arr)).substring(1).
+ toUpperCase());
+ result.set_data((new String(arr)).substring(1).
+ toUpperCase().getBytes());
+ }
+ return 0;
+ }
+ }
+
+ public int dup_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare");
+ int sz1 = dbt1.get_size();
+ int sz2 = dbt2.get_size();
+ if (sz1 < sz2)
+ return -1;
+ if (sz1 > sz2)
+ return 1;
+ byte[] data1 = dbt1.get_data();
+ byte[] data2 = dbt2.get_data();
+ for (int i=0; i<sz1; i++)
+ if (data1[i] != data2[i])
+ return (data1[i] < data2[i] ? -1 : 1);
+ return 0;
+ }
+
+ public static int nseen = 0;
+ public static Hashtable ht = new Hashtable();
+
+ public static String show_dbt(Dbt dbt)
+ {
+ String name;
+
+ if (dbt == null)
+ return "null dbt";
+
+ name = (String)ht.get(dbt);
+ if (name == null) {
+ name = "Dbt" + (nseen++);
+ ht.put(dbt, name);
+ }
+
+ byte[] value = dbt.get_data();
+ if (value == null)
+ return name + "(null)";
+ else
+ return name + "(\"" + new String(value) + "\")";
+ }
+}
+
+
diff --git a/db/test/scr016/TestAssociate.testout b/db/test/scr016/TestAssociate.testout
new file mode 100644
index 000000000..34414b660
--- /dev/null
+++ b/db/test/scr016/TestAssociate.testout
@@ -0,0 +1,30 @@
+Primary database is not null
+Secondary database is not null
+secondary_key_create, Db: not null(secondary), key: Dbt0("abc"), data: Dbt1("cba")
+ creating key(2): BC
+
+secondary_key_create, Db: not null(secondary), key: Dbt2("def"), data: Dbt3("fed")
+ creating key(2): EF
+
+secondary_key_create, Db: not null(secondary), key: Dbt4("ghi"), data: Dbt5("ihg")
+ creating key(2): HI
+
+secondary_key_create, Db: not null(secondary), key: Dbt6("JKL"), data: Dbt7("LKJ")
+ creating key(1): KL
+
+secondary_key_create, Db: not null(secondary), key: Dbt8("MNO"), data: Dbt9("ONM")
+ creating key(1): NO
+
+BC : cba
+EF : fed
+HI : ihg
+KL : LKJ
+NO : ONM
+get BC returns 0
+ values: BC : cba
+pget BC returns 0
+ values: BC : abc : cba
+get KL returns 0
+ values: KL : LKJ
+pget KL returns 0
+ values: KL : JKL : LKJ
diff --git a/db/test/scr016/TestClosedDb.java b/db/test/scr016/TestClosedDb.java
new file mode 100644
index 000000000..67703f040
--- /dev/null
+++ b/db/test/scr016/TestClosedDb.java
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestClosedDb.java,v 1.2 2001/10/05 02:36:08 bostic Exp
+ */
+
+/*
+ * Close the Db, and make sure operations after that fail gracefully.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestClosedDb
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open("my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ // Close the db - subsequent operations should fail
+ // by throwing an exception.
+ db.close(0);
+ try {
+ db.get(null, goodkeydbt, resultdbt, 0);
+ System.out.println("Error - did not expect to get this far.");
+ }
+ catch (DbException dbe) {
+ System.out.println("Got expected Db Exception: " + dbe);
+ }
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/db/test/scr016/TestClosedDb.testout b/db/test/scr016/TestClosedDb.testout
new file mode 100644
index 000000000..ce13883f6
--- /dev/null
+++ b/db/test/scr016/TestClosedDb.testout
@@ -0,0 +1,2 @@
+Got expected Db Exception: com.sleepycat.db.DbException: null object: Invalid argument
+finished test
diff --git a/db/test/scr016/TestConstruct01.java b/db/test/scr016/TestConstruct01.java
new file mode 100644
index 000000000..e4d97f1d9
--- /dev/null
+++ b/db/test/scr016/TestConstruct01.java
@@ -0,0 +1,474 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestConstruct01.java,v 1.4 2001/10/05 02:36:08 bostic Exp
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct01
+{
+ public static final String CONSTRUCT01_DBNAME = "construct01.db";
+ public static final String CONSTRUCT01_DBDIR = "/tmp";
+ public static final String CONSTRUCT01_DBFULLPATH =
+ CONSTRUCT01_DBDIR + "/" + CONSTRUCT01_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ System.err.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ // For some reason on Windows, we need to open using the full pathname
+ // of the file when there is no environment, thus the 'has_env'
+ // variable.
+ //
+ void rundb(Db db, int count, boolean has_env, TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ String name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db.set_error_stream(System.err);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ db.set_pagesize(1024);
+ db.open(name, null, Db.DB_BTREE,
+ (count != 0) ? 0 : Db.DB_CREATE, 0664);
+
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ //outbuf[i] = System.out.println((byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ /*
+ System.out.println("byte: " + ('0' + 0) + ", after: " +
+ (int)'0' + "=" + (int)('0' + 0) +
+ "," + (byte)outbuf[0]);
+ */
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ //DEBUGOUT("Put: " + (char)outbuf[0] + ": " + new String(outbuf));
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(options.dbt_alloc_flags);
+ readdata.set_flags(options.dbt_alloc_flags);
+
+ //DEBUGOUT("Dbc.get");
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ String key_string = new String(readkey.get_data());
+ String data_string = new String(readdata.get_data());
+ //DEBUGOUT("Got: " + key_string + ": " + data_string);
+ int len = key_string.length();
+ if (len <= 0 || key_string.charAt(len-1) != 'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" );
+ }
+ else if (!data_string.equals(key_string)) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string.charAt(i) != ('0' + i)) {
+ System.out.print(" got " + key_string
+ + " (" + (int)key_string.charAt(i)
+ + "), wanted " + i
+ + " (" + (int)('0' + i)
+ + ") at position " + i + "\n");
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " + expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ db.close(0);
+ }
+
+ void t1(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t2(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ }
+
+ void t3(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ // rundb(db, itemcount++, false, options);
+ db.set_errpfx("test3");
+ for (int i=0; i<100; i++)
+ db.set_errpfx("str" + i);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t4(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ /**/
+ //rundb(db, itemcount++, true, options);
+ db.set_errpfx("test4");
+ rundb(db, itemcount++, true, options);
+ /**/
+ env.close(0);
+ }
+
+ void t5(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ // rundb(db, itemcount++, true, options);
+ db.set_errpfx("test5");
+ rundb(db, itemcount++, true, options);
+ /*
+ env.close(0);
+
+ // reopen the environment, don't recreate
+ env.open(CONSTRUCT01_DBDIR, Db.DB_INIT_MPOOL, 0);
+ // Note we cannot reuse the old Db!
+ */
+ Db anotherdb = new Db(env, 0);
+
+ // rundb(anotherdb, itemcount++, true, options);
+ anotherdb.set_errpfx("test5");
+ rundb(anotherdb, itemcount++, true, options);
+ env.close(0);
+ }
+
+ void t6(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+ db.close(0);
+ dbenv.close(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // By design, t7 leaves a db and dbenv open; it should be detected.
+ void t7(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db)
+ {
+ {
+ if (use_db) {
+ try {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT01_DBFULLPATH, null, 0);
+ /**/
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT01_DBDIR, Db.DB_FORCE);
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ //expected error:
+ // System.err.println("error during remove: " + fnfe);
+ }
+ }
+ }
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, !use_db);
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+
+ boolean doall(TestOptions options)
+ {
+ itemcount = 0;
+ try {
+ removeall((options.testmask & 1) != 0);
+ for (int item=1; item<32; item++) {
+ if ((options.testmask & (1 << item)) != 0) {
+ VERBOSEOUT(" Running test " + item + ":");
+ switch (item) {
+ case 1:
+ t1(options);
+ break;
+ case 2:
+ t2(options);
+ break;
+ case 3:
+ t3(options);
+ break;
+ case 4:
+ t4(options);
+ break;
+ case 5:
+ t5(options);
+ break;
+ case 6:
+ t6(options);
+ break;
+ case 7:
+ t7(options);
+ break;
+ default:
+ ERR("unknown test case: " + item);
+ break;
+ }
+ VERBOSEOUT(" finished.\n");
+ }
+ }
+ removeall((options.testmask & 1) != 0);
+ options.successcounter++;
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+ int mask = 0x7f;
+
+ // Make sure the database file is removed before we start.
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, true);
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ if (arg.charAt(0) == '-') {
+ // keep on lower bit, which means to remove db between tests.
+ mask = 1;
+ for (int pos=1; pos<arg.length(); pos++) {
+ char ch = arg.charAt(pos);
+ if (ch >= '0' && ch <= '9') {
+ mask |= (1 << (ch - '0'));
+ }
+ else if (ch == 'v') {
+ verbose_flag = true;
+ }
+ else {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ VERBOSEOUT("mask = " + mask);
+
+ }
+ else {
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+ }
+
+ // Run GC before and after the test to give
+ // a baseline for any Java memory used.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+
+ TestConstruct01 con = new TestConstruct01();
+ int[] dbt_flags = { 0, Db.DB_DBT_MALLOC, Db.DB_DBT_REALLOC };
+ String[] dbt_flags_name = { "default", "malloc", "realloc" };
+
+ TestOptions options = new TestOptions();
+ options.testmask = mask;
+
+ for (int flagiter = 0; flagiter < dbt_flags.length; flagiter++) {
+ options.dbt_alloc_flags = dbt_flags[flagiter];
+
+ VERBOSEOUT("Running with DBT alloc flags: " +
+ dbt_flags_name[flagiter]);
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct01 running:");
+ if (!con.doall(options)) {
+ ERR("SOME TEST FAILED");
+ }
+ else {
+ VERBOSEOUT("\nTESTS SUCCESSFUL");
+ }
+
+ // We continually run GC during the test to keep
+ // the Java memory usage low. That way we can
+ // monitor the total memory usage externally
+ // (e.g. via ps) and verify that we aren't leaking
+ // memory in the JNI or DB layer.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ }
+ }
+
+ if (options.successcounter == 600) {
+ System.out.println("ALL TESTS SUCCESSFUL");
+ }
+ else {
+ System.out.println("***FAIL: " + (600 - options.successcounter) +
+ " tests did not complete");
+ }
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+
+}
+
+class TestOptions
+{
+ int testmask = 0; // which tests to run
+ int dbt_alloc_flags = 0; // DB_DBT_* flags to use
+ int successcounter =0;
+}
+
diff --git a/db/test/scr016/TestConstruct01.testerr b/db/test/scr016/TestConstruct01.testerr
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/db/test/scr016/TestConstruct01.testerr
diff --git a/db/test/scr016/TestConstruct01.testout b/db/test/scr016/TestConstruct01.testout
new file mode 100644
index 000000000..5d2041cd1
--- /dev/null
+++ b/db/test/scr016/TestConstruct01.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/db/test/scr016/TestConstruct02.java b/db/test/scr016/TestConstruct02.java
new file mode 100644
index 000000000..d4442ce73
--- /dev/null
+++ b/db/test/scr016/TestConstruct02.java
@@ -0,0 +1,367 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestConstruct02.java,v 1.3 2001/10/05 02:36:09 bostic Exp
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct02
+{
+ public static final String CONSTRUCT02_DBNAME = "construct02.db";
+ public static final String CONSTRUCT02_DBDIR = "./";
+ public static final String CONSTRUCT02_DBFULLPATH =
+ CONSTRUCT02_DBDIR + "/" + CONSTRUCT02_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ private DbEnv dbenv = new DbEnv(0);
+
+ public TestConstruct02()
+ throws DbException, FileNotFoundException
+ {
+ dbenv.open(CONSTRUCT02_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0666);
+ }
+
+ public void close()
+ {
+ try {
+ dbenv.close(0);
+ removeall(true, true);
+ }
+ catch (DbException dbe) {
+ ERR("DbException: " + dbe);
+ }
+ }
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ void rundb(Db db, int count)
+ throws DbException, FileNotFoundException
+ {
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ //outbuf[i] = System.out.println((byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ /*
+ System.out.println("byte: " + ('0' + 0) + ", after: " +
+ (int)'0' + "=" + (int)('0' + 0) +
+ "," + (byte)outbuf[0]);
+ */
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ //DEBUGOUT("Put: " + (char)outbuf[0] + ": " + new String(outbuf));
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(Db.DB_DBT_MALLOC);
+ readdata.set_flags(Db.DB_DBT_MALLOC);
+
+ //DEBUGOUT("Dbc.get");
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ String key_string = new String(readkey.get_data());
+ String data_string = new String(readdata.get_data());
+
+ //DEBUGOUT("Got: " + key_string + ": " + data_string);
+ int len = key_string.length();
+ if (len <= 0 || key_string.charAt(len-1) != 'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" );
+ }
+ else if (!data_string.equals(key_string)) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string.charAt(i) != ('0' + i)) {
+ System.out.print(" got " + key_string
+ + " (" + (int)key_string.charAt(i)
+ + "), wanted " + i
+ + " (" + (int)('0' + i)
+ + ") at position " + i + "\n");
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " + expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ }
+
+ void t1()
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+
+ // Reopen no longer allowed, so we create a new db.
+ db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db, boolean remove_env)
+ {
+ {
+ try {
+ if (remove_env) {
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT02_DBDIR, Db.DB_FORCE);
+ }
+ else if (use_db) {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT02_DBFULLPATH, null, 0);
+ /**/
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ }
+ check_file_removed(CONSTRUCT02_DBFULLPATH, true, !use_db);
+ if (remove_env) {
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+ }
+
+ boolean doall(int mask)
+ {
+ itemcount = 0;
+ try {
+ for (int item=1; item<32; item++) {
+ if ((mask & (1 << item)) != 0) {
+ VERBOSEOUT(" Running test " + item + ":\n");
+ switch (item) {
+ case 1:
+ t1();
+ break;
+ default:
+ ERR("unknown test case: " + item);
+ break;
+ }
+ VERBOSEOUT(" finished.\n");
+ }
+ }
+ removeall((mask & 1) != 0, false);
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+ int mask = 0x3;
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ if (arg.charAt(0) == '-') {
+ // keep on lower bit, which means to remove db between tests.
+ mask = 1;
+ for (int pos=1; pos<arg.length(); pos++) {
+ char ch = arg.charAt(pos);
+ if (ch >= '0' && ch <= '9') {
+ mask |= (1 << (ch - '0'));
+ }
+ else if (ch == 'v') {
+ verbose_flag = true;
+ }
+ else {
+ ERR("Usage: construct02 [-testdigits] count");
+ }
+ }
+ System.out.println("mask = " + mask);
+
+ }
+ else {
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct02 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+ }
+
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+ TestConstruct02 con = null;
+
+ try {
+ con = new TestConstruct02();
+ }
+ catch (DbException dbe) {
+ System.err.println("Exception: " + dbe);
+ System.exit(1);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("Exception: " + fnfe);
+ System.exit(1);
+ }
+
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct02 running:\n");
+ if (!con.doall(mask)) {
+ ERR("SOME TEST FAILED");
+ }
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+
+ }
+ con.close();
+
+ System.out.print("ALL TESTS SUCCESSFUL\n");
+
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+}
diff --git a/db/test/scr016/TestConstruct02.testout b/db/test/scr016/TestConstruct02.testout
new file mode 100644
index 000000000..5d2041cd1
--- /dev/null
+++ b/db/test/scr016/TestConstruct02.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/db/test/scr016/TestDbtFlags.java b/db/test/scr016/TestDbtFlags.java
new file mode 100644
index 000000000..f72290a5e
--- /dev/null
+++ b/db/test/scr016/TestDbtFlags.java
@@ -0,0 +1,237 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestDbtFlags.java,v 1.2 2001/10/05 02:36:09 bostic Exp
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestDbtFlags
+{
+ private static final String FileName = "access.db";
+ private int flag_value;
+ private int buf_size;
+ private int cur_input_line = 0;
+
+ /*zippy quotes for test input*/
+ static final String[] input_lines = {
+ "I'm having a RELIGIOUS EXPERIENCE.. and I don't take any DRUGS",
+ "I pretend I'm living in a styrofoam packing crate, high in th'" +
+ "SWISS ALPS, still unable to accept th' idea of TOUCH-TONE DIALING!!",
+ "FUN is never having to say you're SUSHI!!",
+ "Hold the MAYO & pass the COSMIC AWARENESS...",
+ "What GOOD is a CARDBOARD suitcase ANYWAY?",
+ "My nose feels like a bad Ronald Reagan movie...",
+ "The LOGARITHM of an ISOCELES TRIANGLE is TUESDAY WELD!!",
+ };
+
+ public TestDbtFlags(int flag_value, int buf_size)
+ {
+ this.flag_value = flag_value;
+ this.buf_size = buf_size;
+ }
+
+ public static void runWithFlags(int flag_value, int size)
+ {
+ String msg = "=-=-=-= Test with DBT flags " + flag_value +
+ " bufsize " + size;
+ System.out.println(msg);
+ System.err.println(msg);
+
+ try
+ {
+ TestDbtFlags app = new TestDbtFlags(flag_value, size);
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestDbtFlags: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestDbtFlags: " + fnfe.toString());
+ System.exit(1);
+ }
+ }
+
+ public static void main(String argv[])
+ {
+ runWithFlags(Db.DB_DBT_MALLOC, -1);
+ runWithFlags(Db.DB_DBT_REALLOC, -1);
+ runWithFlags(Db.DB_DBT_USERMEM, 20);
+ runWithFlags(Db.DB_DBT_USERMEM, 50);
+ runWithFlags(Db.DB_DBT_USERMEM, 200);
+ runWithFlags(0, -1);
+
+ System.exit(0);
+ }
+
+ String get_input_line()
+ {
+ if (cur_input_line >= input_lines.length)
+ return null;
+ return input_lines[cur_input_line++];
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestDbtFlags");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ for (;;) {
+ //System.err.println("input line " + cur_input_line);
+ String line = get_input_line();
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line, flag_value);
+ StringDbt data = new StringDbt(reversed, flag_value);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ key.check_flags();
+ data.check_flags();
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt(flag_value, buf_size);
+ StringDbt data = new StringDbt(flag_value, buf_size);
+
+ int iteration_count = 0;
+ int dbreturn = 0;
+
+ while (dbreturn == 0) {
+ //System.err.println("iteration " + iteration_count);
+ try {
+ if ((dbreturn = iterator.get(key, data, Db.DB_NEXT)) == 0) {
+ System.out.println(key.get_string() + " : " + data.get_string());
+ }
+ }
+ catch (DbMemoryException dme) {
+ /* In a real application, we'd normally increase
+ * the size of the buffer. Since we've created
+ * this error condition for testing, we'll just report it.
+ * We still need to skip over this record, and we don't
+ * want to mess with our original Dbt's, since we want
+ * to see more errors. So create some temporary
+ * mallocing Dbts to get this record.
+ */
+ System.err.println("exception, iteration " + iteration_count +
+ ": " + dme);
+ System.err.println(" key size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+ System.err.println(" data size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+
+ dme.get_dbt().set_size(buf_size);
+ StringDbt tempkey = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ StringDbt tempdata = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ if ((dbreturn = iterator.get(tempkey, tempdata, Db.DB_NEXT)) != 0) {
+ System.err.println("cannot get expected next record");
+ return;
+ }
+ System.out.println(tempkey.get_string() + " : " +
+ tempdata.get_string());
+ }
+ iteration_count++;
+ }
+ key.check_flags();
+ data.check_flags();
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ int saved_flags;
+
+ StringDbt(int flags, int buf_size)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ if (buf_size != -1) {
+ set_data(new byte[buf_size]);
+ set_ulen(buf_size);
+ }
+ }
+
+ StringDbt(String value, int flags)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ set_string(value);
+ }
+
+ void set_string(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ check_flags();
+ }
+
+ String get_string()
+ {
+ check_flags();
+ return new String(get_data(), 0, get_size());
+ }
+
+ void check_flags()
+ {
+ int actual_flags = get_flags();
+ if (actual_flags != saved_flags) {
+ System.err.println("flags botch: expected " + saved_flags +
+ ", got " + actual_flags);
+ }
+ }
+ }
+}
diff --git a/db/test/scr016/TestDbtFlags.testerr b/db/test/scr016/TestDbtFlags.testerr
new file mode 100644
index 000000000..1f8cb2a55
--- /dev/null
+++ b/db/test/scr016/TestDbtFlags.testerr
@@ -0,0 +1,36 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+=-=-=-= Test with DBT flags 16 bufsize -1
+=-=-=-= Test with DBT flags 32 bufsize 20
+exception, iteration 0: Dbt not large enough for available data
+ key size: 41 ulen: 20
+ data size: 41 ulen: 20
+exception, iteration 1: Dbt not large enough for available data
+ key size: 44 ulen: 20
+ data size: 44 ulen: 20
+exception, iteration 2: Dbt not large enough for available data
+ key size: 129 ulen: 20
+ data size: 129 ulen: 20
+exception, iteration 3: Dbt not large enough for available data
+ key size: 63 ulen: 20
+ data size: 63 ulen: 20
+exception, iteration 4: Dbt not large enough for available data
+ key size: 47 ulen: 20
+ data size: 47 ulen: 20
+exception, iteration 5: Dbt not large enough for available data
+ key size: 55 ulen: 20
+ data size: 55 ulen: 20
+exception, iteration 6: Dbt not large enough for available data
+ key size: 41 ulen: 20
+ data size: 41 ulen: 20
+=-=-=-= Test with DBT flags 32 bufsize 50
+exception, iteration 2: Dbt not large enough for available data
+ key size: 129 ulen: 50
+ data size: 129 ulen: 50
+exception, iteration 3: Dbt not large enough for available data
+ key size: 63 ulen: 50
+ data size: 63 ulen: 50
+exception, iteration 5: Dbt not large enough for available data
+ key size: 55 ulen: 50
+ data size: 55 ulen: 50
+=-=-=-= Test with DBT flags 32 bufsize 200
+=-=-=-= Test with DBT flags 0 bufsize -1
diff --git a/db/test/scr016/TestDbtFlags.testout b/db/test/scr016/TestDbtFlags.testout
new file mode 100644
index 000000000..800eebe86
--- /dev/null
+++ b/db/test/scr016/TestDbtFlags.testout
@@ -0,0 +1,48 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+FUN is never having to say you're SUSHI!! : !!IHSUS er'uoy yas ot gnivah reven si NUF
+Hold the MAYO & pass the COSMIC AWARENESS... : ...SSENERAWA CIMSOC eht ssap & OYAM eht dloH
+I pretend I'm living in a styrofoam packing crate, high in th'SWISS ALPS, still unable to accept th' idea of TOUCH-TONE DIALING!! : !!GNILAID ENOT-HCUOT fo aedi 'ht tpecca ot elbanu llits ,SPLA SSIWS'ht ni hgih ,etarc gnikcap maoforyts a ni gnivil m'I dneterp I
+I'm having a RELIGIOUS EXPERIENCE.. and I don't take any DRUGS : SGURD yna ekat t'nod I dna ..ECNEIREPXE SUOIGILER a gnivah m'I
+My nose feels like a bad Ronald Reagan movie... : ...eivom nagaeR dlanoR dab a ekil sleef eson yM
+The LOGARITHM of an ISOCELES TRIANGLE is TUESDAY WELD!! : !!DLEW YADSEUT si ELGNAIRT SELECOSI na fo MHTIRAGOL ehT
+What GOOD is a CARDBOARD suitcase ANYWAY? : ?YAWYNA esactius DRAOBDRAC a si DOOG tahW
+=-=-=-= Test with DBT flags 16 bufsize -1
+FUN is never having to say you're SUSHI!! : !!IHSUS er'uoy yas ot gnivah reven si NUF
+Hold the MAYO & pass the COSMIC AWARENESS... : ...SSENERAWA CIMSOC eht ssap & OYAM eht dloH
+I pretend I'm living in a styrofoam packing crate, high in th'SWISS ALPS, still unable to accept th' idea of TOUCH-TONE DIALING!! : !!GNILAID ENOT-HCUOT fo aedi 'ht tpecca ot elbanu llits ,SPLA SSIWS'ht ni hgih ,etarc gnikcap maoforyts a ni gnivil m'I dneterp I
+I'm having a RELIGIOUS EXPERIENCE.. and I don't take any DRUGS : SGURD yna ekat t'nod I dna ..ECNEIREPXE SUOIGILER a gnivah m'I
+My nose feels like a bad Ronald Reagan movie... : ...eivom nagaeR dlanoR dab a ekil sleef eson yM
+The LOGARITHM of an ISOCELES TRIANGLE is TUESDAY WELD!! : !!DLEW YADSEUT si ELGNAIRT SELECOSI na fo MHTIRAGOL ehT
+What GOOD is a CARDBOARD suitcase ANYWAY? : ?YAWYNA esactius DRAOBDRAC a si DOOG tahW
+=-=-=-= Test with DBT flags 32 bufsize 20
+FUN is never having to say you're SUSHI!! : !!IHSUS er'uoy yas ot gnivah reven si NUF
+Hold the MAYO & pass the COSMIC AWARENESS... : ...SSENERAWA CIMSOC eht ssap & OYAM eht dloH
+I pretend I'm living in a styrofoam packing crate, high in th'SWISS ALPS, still unable to accept th' idea of TOUCH-TONE DIALING!! : !!GNILAID ENOT-HCUOT fo aedi 'ht tpecca ot elbanu llits ,SPLA SSIWS'ht ni hgih ,etarc gnikcap maoforyts a ni gnivil m'I dneterp I
+I'm having a RELIGIOUS EXPERIENCE.. and I don't take any DRUGS : SGURD yna ekat t'nod I dna ..ECNEIREPXE SUOIGILER a gnivah m'I
+My nose feels like a bad Ronald Reagan movie... : ...eivom nagaeR dlanoR dab a ekil sleef eson yM
+The LOGARITHM of an ISOCELES TRIANGLE is TUESDAY WELD!! : !!DLEW YADSEUT si ELGNAIRT SELECOSI na fo MHTIRAGOL ehT
+What GOOD is a CARDBOARD suitcase ANYWAY? : ?YAWYNA esactius DRAOBDRAC a si DOOG tahW
+=-=-=-= Test with DBT flags 32 bufsize 50
+FUN is never having to say you're SUSHI!! : !!IHSUS er'uoy yas ot gnivah reven si NUF
+Hold the MAYO & pass the COSMIC AWARENESS... : ...SSENERAWA CIMSOC eht ssap & OYAM eht dloH
+I pretend I'm living in a styrofoam packing crate, high in th'SWISS ALPS, still unable to accept th' idea of TOUCH-TONE DIALING!! : !!GNILAID ENOT-HCUOT fo aedi 'ht tpecca ot elbanu llits ,SPLA SSIWS'ht ni hgih ,etarc gnikcap maoforyts a ni gnivil m'I dneterp I
+I'm having a RELIGIOUS EXPERIENCE.. and I don't take any DRUGS : SGURD yna ekat t'nod I dna ..ECNEIREPXE SUOIGILER a gnivah m'I
+My nose feels like a bad Ronald Reagan movie... : ...eivom nagaeR dlanoR dab a ekil sleef eson yM
+The LOGARITHM of an ISOCELES TRIANGLE is TUESDAY WELD!! : !!DLEW YADSEUT si ELGNAIRT SELECOSI na fo MHTIRAGOL ehT
+What GOOD is a CARDBOARD suitcase ANYWAY? : ?YAWYNA esactius DRAOBDRAC a si DOOG tahW
+=-=-=-= Test with DBT flags 32 bufsize 200
+FUN is never having to say you're SUSHI!! : !!IHSUS er'uoy yas ot gnivah reven si NUF
+Hold the MAYO & pass the COSMIC AWARENESS... : ...SSENERAWA CIMSOC eht ssap & OYAM eht dloH
+I pretend I'm living in a styrofoam packing crate, high in th'SWISS ALPS, still unable to accept th' idea of TOUCH-TONE DIALING!! : !!GNILAID ENOT-HCUOT fo aedi 'ht tpecca ot elbanu llits ,SPLA SSIWS'ht ni hgih ,etarc gnikcap maoforyts a ni gnivil m'I dneterp I
+I'm having a RELIGIOUS EXPERIENCE.. and I don't take any DRUGS : SGURD yna ekat t'nod I dna ..ECNEIREPXE SUOIGILER a gnivah m'I
+My nose feels like a bad Ronald Reagan movie... : ...eivom nagaeR dlanoR dab a ekil sleef eson yM
+The LOGARITHM of an ISOCELES TRIANGLE is TUESDAY WELD!! : !!DLEW YADSEUT si ELGNAIRT SELECOSI na fo MHTIRAGOL ehT
+What GOOD is a CARDBOARD suitcase ANYWAY? : ?YAWYNA esactius DRAOBDRAC a si DOOG tahW
+=-=-=-= Test with DBT flags 0 bufsize -1
+FUN is never having to say you're SUSHI!! : !!IHSUS er'uoy yas ot gnivah reven si NUF
+Hold the MAYO & pass the COSMIC AWARENESS... : ...SSENERAWA CIMSOC eht ssap & OYAM eht dloH
+I pretend I'm living in a styrofoam packing crate, high in th'SWISS ALPS, still unable to accept th' idea of TOUCH-TONE DIALING!! : !!GNILAID ENOT-HCUOT fo aedi 'ht tpecca ot elbanu llits ,SPLA SSIWS'ht ni hgih ,etarc gnikcap maoforyts a ni gnivil m'I dneterp I
+I'm having a RELIGIOUS EXPERIENCE.. and I don't take any DRUGS : SGURD yna ekat t'nod I dna ..ECNEIREPXE SUOIGILER a gnivah m'I
+My nose feels like a bad Ronald Reagan movie... : ...eivom nagaeR dlanoR dab a ekil sleef eson yM
+The LOGARITHM of an ISOCELES TRIANGLE is TUESDAY WELD!! : !!DLEW YADSEUT si ELGNAIRT SELECOSI na fo MHTIRAGOL ehT
+What GOOD is a CARDBOARD suitcase ANYWAY? : ?YAWYNA esactius DRAOBDRAC a si DOOG tahW
diff --git a/db/test/scr016/TestGetSetMethods.java b/db/test/scr016/TestGetSetMethods.java
new file mode 100644
index 000000000..d940aa2f4
--- /dev/null
+++ b/db/test/scr016/TestGetSetMethods.java
@@ -0,0 +1,99 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestGetSetMethods.java,v 1.2 2001/10/05 02:36:09 bostic Exp
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestGetSetMethods
+{
+ public void testMethods()
+ throws DbException, FileNotFoundException
+ {
+ DbEnv dbenv = new DbEnv(0);
+ DbTxn dbtxn;
+ byte[][] conflicts = new byte[10][10];
+
+ dbenv.set_timeout(0x90000000,
+ Db.DB_SET_LOCK_TIMEOUT);
+ dbenv.set_lg_bsize(0x1000);
+ dbenv.set_lg_dir(".");
+ dbenv.set_lg_max(0x10000000);
+ dbenv.set_lg_regionmax(0x100000);
+ dbenv.set_lk_conflicts(conflicts);
+ dbenv.set_lk_detect(Db.DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv.set_lk_max(0);
+ dbenv.set_lk_max_lockers(100);
+ dbenv.set_lk_max_locks(10);
+ dbenv.set_lk_max_objects(1000);
+ dbenv.set_mp_mmapsize(0x10000);
+ dbenv.set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv.open(".", Db.DB_CREATE | Db.DB_INIT_TXN |
+ Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
+ Db.DB_INIT_MPOOL,
+ 0644);
+
+ dbtxn = dbenv.txn_begin(null, Db.DB_TXN_NOWAIT);
+ dbtxn.set_timeout(0xA0000000, Db.DB_SET_TXN_TIMEOUT);
+ dbtxn.abort();
+
+ dbenv.close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db db_bt = new Db(null, 0);
+ db_bt.set_bt_maxkey(10000);
+ db_bt.set_bt_minkey(100);
+ db_bt.set_cachesize(0, 0x100000, 0);
+ db_bt.close(0);
+
+ Db db_h = new Db(null, 0);
+ db_h.set_h_ffactor(0x10);
+ db_h.set_h_nelem(100);
+ db_h.set_lorder(0);
+ db_h.set_pagesize(0x10000);
+ db_h.close(0);
+
+ Db db_re = new Db(null, 0);
+ db_re.set_re_delim('@');
+ db_re.set_re_pad(10);
+ db_re.set_re_source("re.in");
+ db_re.close(0);
+
+ Db db_q = new Db(null, 0);
+ db_q.set_q_extentsize(200);
+ db_q.close(0);
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ TestGetSetMethods tester = new TestGetSetMethods();
+ tester.testMethods();
+ }
+ catch (Exception e) {
+ System.err.println("TestGetSetMethods: Exception: " + e);
+ }
+ }
+}
diff --git a/db/test/scr016/TestKeyRange.java b/db/test/scr016/TestKeyRange.java
new file mode 100644
index 000000000..0981233b7
--- /dev/null
+++ b/db/test/scr016/TestKeyRange.java
@@ -0,0 +1,203 @@
+/*NOTE: TestKeyRange is AccessExample changed to test Db.key_range.
+ * See comments with ADDED for specific areas of change.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestKeyRange.java,v 1.2 2001/10/09 20:58:34 dda Exp
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.StringReader;
+import java.io.Reader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestKeyRange
+{
+ private static final String FileName = "access.db";
+
+ public TestKeyRange()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestKeyRange\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestKeyRange app = new TestKeyRange();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestKeyRange: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestKeyRange: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestKeyRange");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\nmiddle\nzend\nmoremiddle\nZED\nMAMAMIA");
+
+ int count= 0;/*ADDED*/
+ for (;;) {
+ String line = askForLine(reader, System.out, "input>");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null, key, data, 0)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+
+ /*START ADDED*/
+ {
+ if (count++ > 0) {
+ DbKeyRange range = new DbKeyRange();
+ table.key_range(null, key, range, 0);
+ System.out.println("less: " + range.less);
+ System.out.println("equal: " + range.equal);
+ System.out.println("greater: " + range.greater);
+ }
+ }
+ /*END ADDED*/
+
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/db/test/scr016/TestKeyRange.testout b/db/test/scr016/TestKeyRange.testout
new file mode 100644
index 000000000..c265f3289
--- /dev/null
+++ b/db/test/scr016/TestKeyRange.testout
@@ -0,0 +1,27 @@
+input>
+input>
+less: 0.5
+equal: 0.5
+greater: 0.0
+input>
+less: 0.6666666666666666
+equal: 0.3333333333333333
+greater: 0.0
+input>
+less: 0.5
+equal: 0.25
+greater: 0.25
+input>
+less: 0.0
+equal: 0.2
+greater: 0.8
+input>
+less: 0.0
+equal: 0.16666666666666666
+greater: 0.8333333333333334
+input>MAMAMIA : AIMAMAM
+ZED : DEZ
+abc : cba
+middle : elddim
+moremiddle : elddimerom
+zend : dnez
diff --git a/db/test/scr016/TestLockVec.java b/db/test/scr016/TestLockVec.java
new file mode 100644
index 000000000..6b72be7c5
--- /dev/null
+++ b/db/test/scr016/TestLockVec.java
@@ -0,0 +1,249 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestLockVec.java,v 1.2 2001/10/05 02:36:10 bostic Exp
+ */
+
+/*
+ * test of DbEnv.lock_vec()
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLockVec
+{
+ public static int locker1;
+ public static int locker2;
+
+ public static void gdb_pause()
+ {
+ try {
+ System.err.println("attach gdb and type return...");
+ System.in.read(new byte[10]);
+ }
+ catch (java.io.IOException ie) {
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv dbenv1 = new DbEnv(0);
+ DbEnv dbenv2 = new DbEnv(0);
+ dbenv1.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ dbenv2.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ locker1 = dbenv1.lock_id();
+ locker2 = dbenv1.lock_id();
+ Db db1 = new Db(dbenv1, 0);
+ db1.open("my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ Db db2 = new Db(dbenv2, 0);
+ db2.open("my.db", null, Db.DB_BTREE, 0, 0);
+
+ // populate our database, just two elements.
+ Dbt Akey = new Dbt("A".getBytes());
+ Dbt Adata = new Dbt("Adata".getBytes());
+ Dbt Bkey = new Dbt("B".getBytes());
+ Dbt Bdata = new Dbt("Bdata".getBytes());
+
+ // We don't allow Dbts to be reused within the
+ // same method call, so we need some duplicates.
+ Dbt Akeyagain = new Dbt("A".getBytes());
+ Dbt Bkeyagain = new Dbt("B".getBytes());
+
+ db1.put(null, Akey, Adata, 0);
+ db1.put(null, Bkey, Bdata, 0);
+
+ Dbt notInDatabase = new Dbt("C".getBytes());
+
+ /* make sure our check mechanisms work */
+ int expectedErrs = 0;
+
+ lock_check_free(dbenv2, Akey);
+ try {
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ }
+ catch (DbException dbe1) {
+ expectedErrs += 1;
+ }
+ DbLock tmplock = dbenv1.lock_get(locker1, Db.DB_LOCK_NOWAIT,
+ Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ try {
+ lock_check_free(dbenv2, Akey);
+ }
+ catch (DbException dbe2) {
+ expectedErrs += 2;
+ }
+ if (expectedErrs != 1+2) {
+ System.err.println("lock check mechanism is broken");
+ System.exit(1);
+ }
+ dbenv1.lock_put(tmplock);
+
+ /* Now on with the test, a series of lock_vec requests,
+ * with checks between each call.
+ */
+
+ System.out.println("get a few");
+ /* Request: get A(W), B(R), B(R) */
+ DbLockRequest[] reqs = new DbLockRequest[3];
+
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkeyagain, null);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+
+ System.out.println("put a couple");
+ /* Request: put A, B(first) */
+ reqs[0].set_op(Db.DB_LOCK_PUT);
+ reqs[1].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 2);
+
+ /* Locks held: B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("put one more, test index offset");
+ /* Request: put B(second) */
+ reqs[2].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 2, 1);
+
+ /* Locks held: <none> */
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+
+ System.out.println("get a few");
+ /* Request: get A(R), A(R), B(R) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akeyagain, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(R), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("try putobj");
+ /* Request: get B(R), putobj A */
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_PUT_OBJ, 0,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 1, 2);
+
+ /* Locks held: B(R), B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("get one more");
+ /* Request: get A(W) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("putall");
+ /* Request: putall */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_PUT_ALL, 0,
+ null, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+ db1.close(0);
+ dbenv1.close(0);
+ db2.close(0);
+ dbenv2.close(0);
+ System.out.println("done");
+ }
+ catch (DbLockNotGrantedException nge) {
+ System.err.println("Db Exception: " + nge);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ /* Verify that the lock is free, throw an exception if not.
+ * We do this by trying to grab a write lock (no wait).
+ */
+ static void lock_check_free(DbEnv dbenv, Dbt dbt)
+ throws DbException
+ {
+ DbLock tmplock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ dbenv.lock_put(tmplock);
+ }
+
+ /* Verify that the lock is held with the mode, throw an exception if not.
+ * If we have a write lock, we should not be able to get the lock
+ * for reading. If we have a read lock, we should be able to get
+ * it for reading, but not writing.
+ */
+ static void lock_check_held(DbEnv dbenv, Dbt dbt, int mode)
+ throws DbException
+ {
+ DbLock never = null;
+
+ try {
+ if (mode == Db.DB_LOCK_WRITE) {
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ }
+ else if (mode == Db.DB_LOCK_READ) {
+ DbLock rlock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ dbenv.lock_put(rlock);
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ }
+ else {
+ throw new DbException("lock_check_held bad mode");
+ }
+ }
+ catch (DbLockNotGrantedException nge) {
+ /* We expect this on our last lock_get call */
+ }
+
+ /* make sure we failed */
+ if (never != null) {
+ try {
+ dbenv.lock_put(never);
+ }
+ catch (DbException dbe2) {
+ System.err.println("Got some real troubles now");
+ System.exit(1);
+ }
+ throw new DbException("lock_check_held: lock was not held");
+ }
+ }
+
+}
diff --git a/db/test/scr016/TestLockVec.testout b/db/test/scr016/TestLockVec.testout
new file mode 100644
index 000000000..1cf16c6ac
--- /dev/null
+++ b/db/test/scr016/TestLockVec.testout
@@ -0,0 +1,8 @@
+get a few
+put a couple
+put one more, test index offset
+get a few
+try putobj
+get one more
+putall
+done
diff --git a/db/test/scr016/TestLogc.java b/db/test/scr016/TestLogc.java
new file mode 100644
index 000000000..a484ee763
--- /dev/null
+++ b/db/test/scr016/TestLogc.java
@@ -0,0 +1,90 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestLogc.java,v 1.4 2001/10/12 13:02:33 dda Exp
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLogc
+{
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv env = new DbEnv(0);
+ env.open(".", Db.DB_CREATE | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db db1 = new Db(env, 0);
+ db1.open("first.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ db1.put(null, new Dbt("a".getBytes()), new Dbt("b".getBytes()), 0);
+ db1.put(null, new Dbt("c".getBytes()), new Dbt("d".getBytes()), 0);
+ db1.close(0);
+
+ Db db2 = new Db(env, 0);
+ db2.open("second.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ db2.put(null, new Dbt("w".getBytes()), new Dbt("x".getBytes()), 0);
+ db2.put(null, new Dbt("y".getBytes()), new Dbt("z".getBytes()), 0);
+ db2.close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc logc = env.log_cursor(0);
+
+ int ret = 0;
+ DbLsn lsn = new DbLsn();
+ Dbt dbt = new Dbt();
+ int flags = Db.DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc.get(lsn, dbt, flags)) == 0) {
+ System.out.println("logc.get: " + count);
+
+ // We ignore the contents of the log record,
+ // it's not portable.
+ //
+ // System.out.println(showDbt(dbt));
+ //
+ count++;
+ flags = Db.DB_NEXT;
+ }
+ if (ret != Db.DB_NOTFOUND) {
+ System.err.println("*** Failed to get log record, returned: " +
+ DbEnv.strerror(ret));
+ }
+ logc.close(0);
+ System.out.println("TestLogc done.");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ public static String showDbt(Dbt dbt)
+ {
+ StringBuffer sb = new StringBuffer();
+ int size = dbt.get_size();
+ byte[] data = dbt.get_data();
+ int i;
+ for (i=0; i<size && i<10; i++) {
+ sb.append(Byte.toString(data[i]));
+ sb.append(' ');
+ }
+ if (i<size)
+ sb.append("...");
+ return "size: " + size + " data: " + sb.toString();
+ }
+}
diff --git a/db/test/scr016/TestLogc.testout b/db/test/scr016/TestLogc.testout
new file mode 100644
index 000000000..858947464
--- /dev/null
+++ b/db/test/scr016/TestLogc.testout
@@ -0,0 +1,18 @@
+logc.get: 0
+logc.get: 1
+logc.get: 2
+logc.get: 3
+logc.get: 4
+logc.get: 5
+logc.get: 6
+logc.get: 7
+logc.get: 8
+logc.get: 9
+logc.get: 10
+logc.get: 11
+logc.get: 12
+logc.get: 13
+logc.get: 14
+logc.get: 15
+logc.get: 16
+TestLogc done.
diff --git a/db/test/scr016/TestOpenEmpty.java b/db/test/scr016/TestOpenEmpty.java
new file mode 100644
index 000000000..d16d99305
--- /dev/null
+++ b/db/test/scr016/TestOpenEmpty.java
@@ -0,0 +1,189 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestOpenEmpty.java,v 1.2 2001/10/05 02:36:10 bostic Exp
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestOpenEmpty
+{
+ private static final String FileName = "access.db";
+
+ public TestOpenEmpty()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestOpenEmpty\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestOpenEmpty app = new TestOpenEmpty();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestOpenEmpty: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestOpenEmpty: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ try { (new java.io.FileOutputStream(FileName)).close(); }
+ catch (IOException ioe) { }
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestOpenEmpty");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/db/test/scr016/TestOpenEmpty.testerr b/db/test/scr016/TestOpenEmpty.testerr
new file mode 100644
index 000000000..dd3e01c7a
--- /dev/null
+++ b/db/test/scr016/TestOpenEmpty.testerr
@@ -0,0 +1,2 @@
+TestOpenEmpty: access.db: unexpected file type or format
+TestOpenEmpty: com.sleepycat.db.DbException: Invalid argument: Invalid argument
diff --git a/db/test/scr016/TestReplication.java b/db/test/scr016/TestReplication.java
new file mode 100644
index 000000000..2b3780770
--- /dev/null
+++ b/db/test/scr016/TestReplication.java
@@ -0,0 +1,289 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestReplication.java,v 1.1 2001/10/12 13:02:33 dda Exp
+ */
+
+/*
+ * Simple test of replication, merely to exercise the individual
+ * methods in the API. Rather than use TCP/IP, our transport
+ * mechanism is just an ArrayList of byte arrays.
+ * It's managed like a queue, and synchronization is via
+ * the ArrayList object itself and java's wait/notify.
+ * It's not terribly extensible, but it's fine for a small test.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Vector;
+
+public class TestReplication extends Thread
+ implements DbRepTransport
+{
+ public static final String MASTER_ENVDIR = "./master";
+ public static final String CLIENT_ENVDIR = "./client";
+
+ private Vector queue = new Vector();
+ private DbEnv master_env;
+ private DbEnv client_env;
+
+ private static void mkdir(String name)
+ throws IOException
+ {
+ (new File(name)).mkdir();
+ }
+
+
+ // The client thread runs this
+ public void run()
+ {
+ try {
+ System.err.println("c10");
+ client_env = new DbEnv(0);
+ System.err.println("c11");
+ client_env.set_rep_transport(1, this);
+ System.err.println("c12");
+ client_env.open(CLIENT_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ System.err.println("c13");
+ Dbt myid = new Dbt("master01".getBytes());
+ System.err.println("c14");
+ client_env.rep_start(myid, Db.DB_REP_CLIENT);
+ System.err.println("c15");
+ DbEnv.RepProcessMessage processMsg = new DbEnv.RepProcessMessage();
+ processMsg.envid = 2;
+ System.err.println("c20");
+ boolean running = true;
+
+ Dbt control = new Dbt();
+ Dbt rec = new Dbt();
+
+ while (running) {
+ int msgtype = 0;
+
+ System.err.println("c30");
+ synchronized (queue) {
+ if (queue.size() == 0) {
+ System.err.println("c40");
+ sleepShort();
+ }
+ else {
+ msgtype = ((Integer)queue.firstElement()).intValue();
+ queue.removeElementAt(0);
+ byte[] data;
+
+ System.err.println("c50 " + msgtype);
+
+ switch (msgtype) {
+ case -1:
+ running = false;
+ break;
+ case 1:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ control.set_data(data);
+ control.set_size(data.length);
+ break;
+ case 2:
+ control.set_data(null);
+ control.set_size(0);
+ break;
+ case 3:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ rec.set_data(data);
+ rec.set_size(data.length);
+ break;
+ case 4:
+ rec.set_data(null);
+ rec.set_size(0);
+ break;
+ }
+
+ }
+ }
+ System.err.println("c60");
+ if (msgtype == 3 || msgtype == 4) {
+ System.out.println("cLIENT: Got message");
+ client_env.rep_process_message(control, rec,
+ processMsg);
+ }
+ }
+ System.err.println("c70");
+ Db db = new Db(client_env, 0);
+ db.open("x.db", null, Db.DB_BTREE, 0, 0);
+ Dbt data = new Dbt();
+ System.err.println("c80");
+ db.get(null, new Dbt("Hello".getBytes()), data, 0);
+ System.err.println("c90");
+ System.out.println("Hello " + new String(data.get_data(), data.get_offset(), data.get_size()));
+ System.err.println("c95");
+ client_env.close(0);
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ // Implements DbTransport
+ public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
+ throws DbException
+ {
+ System.out.println("Send to " + envid);
+ if (envid == 1) {
+ System.err.println("Unexpected envid = " + envid);
+ return 0;
+ }
+
+ int nbytes = 0;
+
+ synchronized (queue) {
+ System.out.println("Sending message");
+ byte[] data = control.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(1));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(2));
+ }
+
+ data = rec.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(3));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(4));
+ }
+ System.out.println("MASTER: sent message");
+ }
+ return 0;
+ }
+
+ public void sleepShort()
+ {
+ try {
+ sleep(100);
+ }
+ catch (InterruptedException ie)
+ {
+ }
+ }
+
+ public void send_terminator()
+ {
+ synchronized (queue) {
+ queue.addElement(new Integer(-1));
+ }
+ }
+
+ public void master()
+ {
+ try {
+ master_env = new DbEnv(0);
+ master_env.set_rep_transport(2, this);
+ master_env.open(MASTER_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0644);
+ System.err.println("10");
+ Dbt myid = new Dbt("client01".getBytes());
+ master_env.rep_start(myid, Db.DB_REP_MASTER);
+ System.err.println("10");
+ Db db = new Db(master_env, 0);
+ System.err.println("20");
+ db.open("x.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.err.println("30");
+ db.put(null, new Dbt("Hello".getBytes()),
+ new Dbt("world".getBytes()), 0);
+ System.err.println("40");
+ //DbEnv.RepElectResult electionResult = new DbEnv.RepElectResult();
+ //master_env.rep_elect(2, 2, 3, 4, electionResult);
+ db.close(0);
+ System.err.println("50");
+ master_env.close(0);
+ send_terminator();
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ // The test should only take a few milliseconds.
+ // give it 10 seconds before bailing out.
+ TimelimitThread t = new TimelimitThread(1000*10);
+ t.start();
+
+ try {
+ mkdir(CLIENT_ENVDIR);
+ mkdir(MASTER_ENVDIR);
+
+ TestReplication rep = new TestReplication();
+
+ // Run the client as a seperate thread.
+ rep.start();
+
+ // Run the master.
+ rep.master();
+
+ // Wait for the master to finish.
+ rep.join();
+ }
+ catch (Exception e)
+ {
+ System.err.println("Exception: " + e);
+ }
+ t.finished();
+ }
+
+}
+
+class TimelimitThread extends Thread
+{
+ long nmillis;
+ boolean finished = false;
+
+ TimelimitThread(long nmillis)
+ {
+ this.nmillis = nmillis;
+ }
+
+ public void finished()
+ {
+ finished = true;
+ }
+
+ public void run()
+ {
+ long targetTime = System.currentTimeMillis() + nmillis;
+ long curTime;
+
+ while (!finished &&
+ ((curTime = System.currentTimeMillis()) < targetTime)) {
+ long diff = targetTime - curTime;
+ if (diff > 100)
+ diff = 100;
+ try {
+ sleep(diff);
+ }
+ catch (InterruptedException ie) {
+ }
+ }
+ System.err.println("");
+ System.exit(1);
+ }
+}
diff --git a/db/test/scr016/TestRpcServer.java b/db/test/scr016/TestRpcServer.java
new file mode 100644
index 000000000..46294ef8d
--- /dev/null
+++ b/db/test/scr016/TestRpcServer.java
@@ -0,0 +1,193 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2001
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestRpcServer.java,v 1.2 2001/10/05 02:36:10 bostic Exp
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestRpcServer
+{
+ private static final String FileName = "access.db";
+
+ public TestRpcServer()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestRpcServer\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestRpcServer app = new TestRpcServer();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestRpcServer: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestRpcServer: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ DbEnv dbenv = new DbEnv(Db.DB_CLIENT);
+ dbenv.set_rpc_server(null, "localhost", 0, 0, 0);
+ dbenv.open(".", Db.DB_CREATE, 0644);
+ System.out.println("server connection set");
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(dbenv, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestRpcServer");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader =
+ new StringReader("abc\nStuff\nmore Stuff\nlast line\n");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/db/test/scr016/TestSameDbt.java b/db/test/scr016/TestSameDbt.java
new file mode 100644
index 000000000..d4024b12b
--- /dev/null
+++ b/db/test/scr016/TestSameDbt.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestSameDbt.java,v 1.2 2001/10/05 02:36:10 bostic Exp
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSameDbt
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open("my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // try reusing the dbt
+ Dbt keydatadbt = new Dbt("stuff".getBytes());
+ int gotexcept = 0;
+
+ try {
+ db.put(null, keydatadbt, keydatadbt, 0);
+ }
+ catch (DbException dbe) {
+ System.out.println("got expected Db Exception: " + dbe);
+ gotexcept++;
+ }
+
+ if (gotexcept != 1) {
+ System.err.println("Missed exception");
+ System.out.println("** FAIL **");
+ }
+ else {
+ System.out.println("Test succeeded.");
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/db/test/scr016/TestSameDbt.testout b/db/test/scr016/TestSameDbt.testout
new file mode 100644
index 000000000..be4bbbe59
--- /dev/null
+++ b/db/test/scr016/TestSameDbt.testout
@@ -0,0 +1,2 @@
+got expected Db Exception: com.sleepycat.db.DbException: Dbt is already in use
+Test succeeded.
diff --git a/db/test/scr016/TestSimpleAccess.java b/db/test/scr016/TestSimpleAccess.java
new file mode 100644
index 000000000..7b2cf61c4
--- /dev/null
+++ b/db/test/scr016/TestSimpleAccess.java
@@ -0,0 +1,70 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestSimpleAccess.java,v 1.2 2001/10/05 02:36:10 bostic Exp
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSimpleAccess
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open("my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/db/test/scr016/TestSimpleAccess.testout b/db/test/scr016/TestSimpleAccess.testout
new file mode 100644
index 000000000..dc88d4788
--- /dev/null
+++ b/db/test/scr016/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/db/test/scr016/TestTruncate.java b/db/test/scr016/TestTruncate.java
new file mode 100644
index 000000000..3f74e8cd5
--- /dev/null
+++ b/db/test/scr016/TestTruncate.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * Id: TestTruncate.java,v 1.2 2001/10/05 02:36:11 bostic Exp
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestTruncate
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open("my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ System.out.println("truncating data...");
+ int nrecords = db.truncate(null, 0);
+ System.out.println("truncate returns " + nrecords);
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("after trunctate get: " +
+ DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ db.close(0);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/db/test/scr016/TestTruncate.testout b/db/test/scr016/TestTruncate.testout
new file mode 100644
index 000000000..23f291df7
--- /dev/null
+++ b/db/test/scr016/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after trunctate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/db/test/scr016/chk.javatests b/db/test/scr016/chk.javatests
new file mode 100644
index 000000000..df845f522
--- /dev/null
+++ b/db/test/scr016/chk.javatests
@@ -0,0 +1,78 @@
+#!/bin/sh -
+#
+# Id: chk.javatests,v 1.2 2001/10/12 13:02:34 dda Exp
+#
+# Check to make sure that regression tests for Java run.
+
+TEST_JAVA_SRCDIR=../test/scr016 # must be a relative directory
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="./classes:../db.jar"
+export LD_LIBRARY_PATH="../.libs"
+
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../include"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+version=`head -1 ../README | sed -e 's/.* \([0-9]*\.[0-9]*\)\..*/\1/'`
+[ -f libdb_java-$version.la ] || make libdb_java-$version.la || {
+ echo "FAIL: unable to build libdb_java-$version.la"
+ exit 1
+}
+[ -f db.jar ] || make db.jar || {
+ echo 'FAIL: unable to build db.jar'
+ exit 1
+}
+testnames=`cd $TEST_JAVA_SRCDIR; ls *.java | sed -e 's/\.java$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_JAVA_SRCDIR/ignore > /dev/null; then
+ echo " **** java test $testname ignored"
+ continue
+ fi
+
+ echo " ==== java test $testname"
+ rm -rf TESTJAVA; mkdir -p TESTJAVA/classes
+ cd ./TESTJAVA
+ testprefix=../$TEST_JAVA_SRCDIR/$testname
+ ${JAVAC} -d ./classes $testprefix.java > ../$testname.compileout 2>&1 || {
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ${JAVA} com.sleepycat.test.$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTJAVA
+exit 0
diff --git a/db/test/scr016/ignore b/db/test/scr016/ignore
new file mode 100644
index 000000000..ced56a904
--- /dev/null
+++ b/db/test/scr016/ignore
@@ -0,0 +1,11 @@
+#
+# Id: ignore,v 1.3 2001/10/12 13:02:34 dda Exp
+#
+# A list of tests to ignore
+
+# TestRpcServer is not debugged
+TestRpcServer
+
+# TestReplication is not debugged
+TestReplication
+
diff --git a/db/test/scr016/testall b/db/test/scr016/testall
new file mode 100644
index 000000000..5439ec22c
--- /dev/null
+++ b/db/test/scr016/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# Id: testall,v 1.4 2001/09/13 14:49:37 dda Exp
+#
+# Run all the Java regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.java -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.java$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** java test $name ignored"
+ else
+ echo " ==== java test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/db/test/scr016/testone b/db/test/scr016/testone
new file mode 100644
index 000000000..502027062
--- /dev/null
+++ b/db/test/scr016/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# Id: testone,v 1.4 2001/09/20 14:40:29 dda Exp
+#
+# Run just one Java regression test, the single argument
+# is the classname within this package.
+
+error()
+{
+ echo '' >&2
+ echo "Java regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# classdir is relative to TESTDIR subdirectory
+classdir=./classes
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="$classdir:$CLASSPATH"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ export CLASSPATH="$prefix/lib/db.jar:$CLASSPATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# class must be public
+if ! grep "public.*class.*$name" $name.java > /dev/null; then
+ error "public class $name is not declared in file $name.java"
+ exit 1
+fi
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+mkdir -p $classdir
+${JAVAC} -d $classdir ../$name.java > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$stdinflag" = y ]
+then
+ ${JAVA} com.sleepycat.test.$name >../$name.out 2>../$name.err
+else
+ ${JAVA} com.sleepycat.test.$name <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/db/test/scr017/O.BH b/db/test/scr017/O.BH
new file mode 100644
index 000000000..cd499d387
--- /dev/null
+++ b/db/test/scr017/O.BH
@@ -0,0 +1,196 @@
+abc_10_efg
+abc_10_efg
+abc_11_efg
+abc_11_efg
+abc_12_efg
+abc_12_efg
+abc_13_efg
+abc_13_efg
+abc_14_efg
+abc_14_efg
+abc_15_efg
+abc_15_efg
+abc_16_efg
+abc_16_efg
+abc_17_efg
+abc_17_efg
+abc_18_efg
+abc_18_efg
+abc_19_efg
+abc_19_efg
+abc_1_efg
+abc_1_efg
+abc_20_efg
+abc_20_efg
+abc_21_efg
+abc_21_efg
+abc_22_efg
+abc_22_efg
+abc_23_efg
+abc_23_efg
+abc_24_efg
+abc_24_efg
+abc_25_efg
+abc_25_efg
+abc_26_efg
+abc_26_efg
+abc_27_efg
+abc_27_efg
+abc_28_efg
+abc_28_efg
+abc_29_efg
+abc_29_efg
+abc_2_efg
+abc_2_efg
+abc_30_efg
+abc_30_efg
+abc_31_efg
+abc_31_efg
+abc_32_efg
+abc_32_efg
+abc_33_efg
+abc_33_efg
+abc_34_efg
+abc_34_efg
+abc_36_efg
+abc_36_efg
+abc_37_efg
+abc_37_efg
+abc_38_efg
+abc_38_efg
+abc_39_efg
+abc_39_efg
+abc_3_efg
+abc_3_efg
+abc_40_efg
+abc_40_efg
+abc_41_efg
+abc_41_efg
+abc_42_efg
+abc_42_efg
+abc_43_efg
+abc_43_efg
+abc_44_efg
+abc_44_efg
+abc_45_efg
+abc_45_efg
+abc_46_efg
+abc_46_efg
+abc_47_efg
+abc_47_efg
+abc_48_efg
+abc_48_efg
+abc_49_efg
+abc_49_efg
+abc_4_efg
+abc_4_efg
+abc_50_efg
+abc_50_efg
+abc_51_efg
+abc_51_efg
+abc_52_efg
+abc_52_efg
+abc_53_efg
+abc_53_efg
+abc_54_efg
+abc_54_efg
+abc_55_efg
+abc_55_efg
+abc_56_efg
+abc_56_efg
+abc_57_efg
+abc_57_efg
+abc_58_efg
+abc_58_efg
+abc_59_efg
+abc_59_efg
+abc_5_efg
+abc_5_efg
+abc_60_efg
+abc_60_efg
+abc_61_efg
+abc_61_efg
+abc_62_efg
+abc_62_efg
+abc_63_efg
+abc_63_efg
+abc_64_efg
+abc_64_efg
+abc_65_efg
+abc_65_efg
+abc_66_efg
+abc_66_efg
+abc_67_efg
+abc_67_efg
+abc_68_efg
+abc_68_efg
+abc_69_efg
+abc_69_efg
+abc_6_efg
+abc_6_efg
+abc_70_efg
+abc_70_efg
+abc_71_efg
+abc_71_efg
+abc_72_efg
+abc_72_efg
+abc_73_efg
+abc_73_efg
+abc_74_efg
+abc_74_efg
+abc_75_efg
+abc_75_efg
+abc_76_efg
+abc_76_efg
+abc_77_efg
+abc_77_efg
+abc_78_efg
+abc_78_efg
+abc_79_efg
+abc_79_efg
+abc_7_efg
+abc_7_efg
+abc_80_efg
+abc_80_efg
+abc_81_efg
+abc_81_efg
+abc_82_efg
+abc_82_efg
+abc_83_efg
+abc_83_efg
+abc_84_efg
+abc_84_efg
+abc_85_efg
+abc_85_efg
+abc_86_efg
+abc_86_efg
+abc_87_efg
+abc_87_efg
+abc_88_efg
+abc_88_efg
+abc_89_efg
+abc_89_efg
+abc_8_efg
+abc_8_efg
+abc_90_efg
+abc_90_efg
+abc_91_efg
+abc_91_efg
+abc_92_efg
+abc_92_efg
+abc_93_efg
+abc_93_efg
+abc_94_efg
+abc_94_efg
+abc_95_efg
+abc_95_efg
+abc_96_efg
+abc_96_efg
+abc_97_efg
+abc_97_efg
+abc_98_efg
+abc_98_efg
+abc_99_efg
+abc_99_efg
+abc_9_efg
+abc_9_efg
diff --git a/db/test/scr017/O.R b/db/test/scr017/O.R
new file mode 100644
index 000000000..d78a04727
--- /dev/null
+++ b/db/test/scr017/O.R
@@ -0,0 +1,196 @@
+1
+abc_1_efg
+2
+abc_2_efg
+3
+abc_3_efg
+4
+abc_4_efg
+5
+abc_5_efg
+6
+abc_6_efg
+7
+abc_7_efg
+8
+abc_8_efg
+9
+abc_9_efg
+10
+abc_10_efg
+11
+abc_11_efg
+12
+abc_12_efg
+13
+abc_13_efg
+14
+abc_14_efg
+15
+abc_15_efg
+16
+abc_16_efg
+17
+abc_17_efg
+18
+abc_18_efg
+19
+abc_19_efg
+20
+abc_20_efg
+21
+abc_21_efg
+22
+abc_22_efg
+23
+abc_23_efg
+24
+abc_24_efg
+25
+abc_25_efg
+26
+abc_26_efg
+27
+abc_27_efg
+28
+abc_28_efg
+29
+abc_29_efg
+30
+abc_30_efg
+31
+abc_31_efg
+32
+abc_32_efg
+33
+abc_33_efg
+34
+abc_34_efg
+35
+abc_36_efg
+36
+abc_37_efg
+37
+abc_38_efg
+38
+abc_39_efg
+39
+abc_40_efg
+40
+abc_41_efg
+41
+abc_42_efg
+42
+abc_43_efg
+43
+abc_44_efg
+44
+abc_45_efg
+45
+abc_46_efg
+46
+abc_47_efg
+47
+abc_48_efg
+48
+abc_49_efg
+49
+abc_50_efg
+50
+abc_51_efg
+51
+abc_52_efg
+52
+abc_53_efg
+53
+abc_54_efg
+54
+abc_55_efg
+55
+abc_56_efg
+56
+abc_57_efg
+57
+abc_58_efg
+58
+abc_59_efg
+59
+abc_60_efg
+60
+abc_61_efg
+61
+abc_62_efg
+62
+abc_63_efg
+63
+abc_64_efg
+64
+abc_65_efg
+65
+abc_66_efg
+66
+abc_67_efg
+67
+abc_68_efg
+68
+abc_69_efg
+69
+abc_70_efg
+70
+abc_71_efg
+71
+abc_72_efg
+72
+abc_73_efg
+73
+abc_74_efg
+74
+abc_75_efg
+75
+abc_76_efg
+76
+abc_77_efg
+77
+abc_78_efg
+78
+abc_79_efg
+79
+abc_80_efg
+80
+abc_81_efg
+81
+abc_82_efg
+82
+abc_83_efg
+83
+abc_84_efg
+84
+abc_85_efg
+85
+abc_86_efg
+86
+abc_87_efg
+87
+abc_88_efg
+88
+abc_89_efg
+89
+abc_90_efg
+90
+abc_91_efg
+91
+abc_92_efg
+92
+abc_93_efg
+93
+abc_94_efg
+94
+abc_95_efg
+95
+abc_96_efg
+96
+abc_97_efg
+97
+abc_98_efg
+98
+abc_99_efg
diff --git a/db/test/scr017/chk.db185 b/db/test/scr017/chk.db185
new file mode 100644
index 000000000..810ba59b1
--- /dev/null
+++ b/db/test/scr017/chk.db185
@@ -0,0 +1,26 @@
+#!/bin/sh -
+#
+# Id: chk.db185,v 1.2 2001/10/12 17:55:38 bostic Exp
+#
+# Check to make sure we can run DB 1.85 code.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/db/test/scr017/t.c b/db/test/scr017/t.c
new file mode 100644
index 000000000..f03b33880
--- /dev/null
+++ b/db/test/scr017/t.c
@@ -0,0 +1,188 @@
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_185.h"
+
+void err(char *);
+int mycmp(const DBT *, const DBT *);
+void ops(DB *, int);
+
+int
+main()
+{
+ DB *dbp;
+ HASHINFO h_info;
+ BTREEINFO b_info;
+ RECNOINFO r_info;
+
+ printf("\tBtree...\n");
+ memset(&b_info, 0, sizeof(b_info));
+ b_info.flags = R_DUP;
+ b_info.cachesize = 100 * 1024;
+ b_info.psize = 512;
+ b_info.lorder = 4321;
+ b_info.compare = mycmp;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_BTREE, &b_info)) == NULL)
+ err("dbopen: btree");
+ ops(dbp, DB_BTREE);
+
+ printf("\tHash...\n");
+ memset(&h_info, 0, sizeof(h_info));
+ h_info.bsize = 512;
+ h_info.ffactor = 6;
+ h_info.nelem = 1000;
+ h_info.cachesize = 100 * 1024;
+ h_info.lorder = 1234;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_HASH, &h_info)) == NULL)
+ err("dbopen: hash");
+ ops(dbp, DB_HASH);
+
+ printf("\tRecno...\n");
+ memset(&r_info, 0, sizeof(r_info));
+ r_info.flags = R_FIXEDLEN;
+ r_info.cachesize = 100 * 1024;
+ r_info.psize = 1024;
+ r_info.reclen = 37;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_RECNO, &r_info)) == NULL)
+ err("dbopen: recno");
+ ops(dbp, DB_RECNO);
+
+ return (0);
+}
+
+int
+mycmp(a, b)
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+void
+ops(dbp, type)
+ DB *dbp;
+ int type;
+{
+ FILE *outfp;
+ DBT key, data;
+ recno_t recno;
+ int i, ret;
+ char buf[64];
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ for (i = 1; i < 100; ++i) { /* Test DB->put. */
+ sprintf(buf, "abc_%d_efg", i);
+ if (type == DB_RECNO) {
+ recno = i;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = data.data = buf;
+ key.size = data.size = strlen(buf);
+ }
+
+ data.data = buf;
+ data.size = strlen(buf);
+ if (dbp->put(dbp, &key, &data, 0))
+ err("DB->put");
+ }
+
+ if (type == DB_RECNO) { /* Test DB->get. */
+ recno = 97;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 97);
+ if (dbp->get(dbp, &key, &data, 0) != 0)
+ err("DB->get");
+ if (memcmp(data.data, buf, strlen(buf)))
+ err("DB->get: wrong data returned");
+
+ if (type == DB_RECNO) { /* Test DB->put no-overwrite. */
+ recno = 42;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 42);
+ if (dbp->put(dbp, &key, &data, R_NOOVERWRITE) == 0)
+ err("DB->put: no-overwrite succeeded");
+
+ if (type == DB_RECNO) { /* Test DB->del. */
+ recno = 35;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ sprintf(buf, "abc_%d_efg", 35);
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ if (dbp->del(dbp, &key, 0))
+ err("DB->del");
+
+ /* Test DB->seq. */
+ if ((outfp = fopen("output", "w")) == NULL)
+ err("fopen: output");
+ while ((ret = dbp->seq(dbp, &key, &data, R_NEXT)) == 0) {
+ if (type == DB_RECNO)
+ fprintf(outfp, "%d\n", *(int *)key.data);
+ else
+ fprintf(outfp,
+ "%.*s\n", (int)key.size, (char *)key.data);
+ fprintf(outfp, "%.*s\n", (int)data.size, (char *)data.data);
+ }
+ if (ret != 1)
+ err("DB->seq");
+ fclose(outfp);
+ switch (type) {
+ case DB_BTREE:
+ ret = system("cmp output O.BH");
+ break;
+ case DB_HASH:
+ ret = system("sort output | cmp - O.BH");
+ break;
+ case DB_RECNO:
+ ret = system("cmp output O.R");
+ break;
+ }
+ if (ret != 0)
+ err("output comparison failed");
+
+ if (dbp->sync(dbp, 0)) /* Test DB->sync. */
+ err("DB->sync");
+
+ if (dbp->close(dbp)) /* Test DB->close. */
+ err("DB->close");
+}
+
+void
+err(s)
+ char *s;
+{
+ fprintf(stderr, "\t%s: %s\n", s, strerror(errno));
+ exit (1);
+}
diff --git a/db/test/shelltest.tcl b/db/test/shelltest.tcl
new file mode 100644
index 000000000..97c215acc
--- /dev/null
+++ b/db/test/shelltest.tcl
@@ -0,0 +1,83 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001
+# Sleepycat Software. All rights reserved.
+#
+# Id: shelltest.tcl,v 1.13 2001/10/12 15:54:53 bostic Exp
+#
+# TEST scr###
+# TEST The scr### directories are shell scripts that test a variety of
+# TEST things, including things about the distribution itself. These
+# TEST tests won't run on most systems, so don't even try to run them.
+#
+# shelltest.tcl:
+# Code to run shell script tests, to incorporate Java, C++,
+# example compilation, etc. test scripts into the Tcl framework.
+proc shelltest { { run_one 0 }} {
+ source ./include.tcl
+ global shelltest_list
+
+ set SH /bin/sh
+ if { [file executable $SH] != 1 } {
+ puts "Shell tests require valid shell /bin/sh: not found."
+ puts "Skipping shell tests."
+ return 0
+ }
+
+ if { $run_one == 0 } {
+ puts "Running shell script tests..."
+
+ foreach testpair $shelltest_list {
+ set dir [lindex $testpair 0]
+ set test [lindex $testpair 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+ } else {
+ set run_one [expr $run_one - 1];
+ set dir [lindex [lindex $shelltest_list $run_one] 0]
+ set test [lindex [lindex $shelltest_list $run_one] 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+}
+
+proc shelltest_copy { fromdir todir } {
+ set globall [glob $fromdir/*]
+
+ foreach f $globall {
+ file copy $f $todir/
+ }
+}
+
+proc shelltest_run { sh srcdir test testdir } {
+ puts "Running shell script $test..."
+
+ set ret [catch {exec $sh -c "cd $testdir && sh $test" >&@ stdout} res]
+
+ if { $ret != 0 } {
+ puts "FAIL: shell test $srcdir/$test exited abnormally"
+ }
+}
+
+proc scr001 {} { shelltest 1 }
+proc scr002 {} { shelltest 2 }
+proc scr003 {} { shelltest 3 }
+proc scr004 {} { shelltest 4 }
+proc scr005 {} { shelltest 5 }
+proc scr006 {} { shelltest 6 }
+proc scr007 {} { shelltest 7 }
+proc scr008 {} { shelltest 8 }
+proc scr009 {} { shelltest 9 }
+proc scr010 {} { shelltest 10 }
+proc scr011 {} { shelltest 11 }
+proc scr012 {} { shelltest 12 }
+proc scr013 {} { shelltest 13 }
+proc scr014 {} { shelltest 14 }
+proc scr015 {} { shelltest 15 }
+proc scr016 {} { shelltest 16 }
+proc scr017 {} { shelltest 17 }
diff --git a/db/txn/txn_method.c b/db/txn/txn_method.c
new file mode 100644
index 000000000..8207acb05
--- /dev/null
+++ b/db/txn/txn_method.c
@@ -0,0 +1,125 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1996-2001
+ * Sleepycat Software. All rights reserved.
+ */
+
+#include "db_config.h"
+
+#ifndef lint
+static const char revid[] = "Id: txn_method.c,v 11.55 2001/10/08 16:04:37 bostic Exp ";
+#endif /* not lint */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <string.h>
+#endif
+
+#ifdef HAVE_RPC
+#include "db_server.h"
+#endif
+
+#include "db_int.h"
+#include "db_page.h"
+#include "log.h"
+#include "txn.h"
+
+#ifdef HAVE_RPC
+#include "rpc_client_ext.h"
+#endif
+
+static int __txn_set_tx_max __P((DB_ENV *, u_int32_t));
+static int __txn_set_tx_recover __P((DB_ENV *,
+ int (*)(DB_ENV *, DBT *, DB_LSN *, db_recops)));
+static int __txn_set_tx_timestamp __P((DB_ENV *, time_t *));
+
+/*
+ * __txn_dbenv_create --
+ * Transaction specific initialization of the DB_ENV structure.
+ *
+ * PUBLIC: void __txn_dbenv_create __P((DB_ENV *));
+ */
+void
+__txn_dbenv_create(dbenv)
+ DB_ENV *dbenv;
+{
+ /*
+ * !!!
+ * Our caller has not yet had the opportunity to reset the panic
+ * state or turn off mutex locking, and so we can neither check
+ * the panic state or acquire a mutex in the DB_ENV create path.
+ */
+
+ dbenv->tx_max = DEF_MAX_TXNS;
+
+#ifdef HAVE_RPC
+ if (F_ISSET(dbenv, DB_ENV_RPCCLIENT)) {
+ dbenv->set_tx_max = __dbcl_set_tx_max;
+ dbenv->set_tx_recover = __dbcl_set_tx_recover;
+ dbenv->set_tx_timestamp = __dbcl_set_tx_timestamp;
+ dbenv->txn_checkpoint = __dbcl_txn_checkpoint;
+ dbenv->txn_recover = __dbcl_txn_recover;
+ dbenv->txn_stat = __dbcl_txn_stat;
+ dbenv->txn_begin = __dbcl_txn_begin;
+ } else
+#endif
+ {
+ dbenv->set_tx_max = __txn_set_tx_max;
+ dbenv->set_tx_recover = __txn_set_tx_recover;
+ dbenv->set_tx_timestamp = __txn_set_tx_timestamp;
+ dbenv->txn_checkpoint = __txn_checkpoint;
+#ifdef CONFIG_TEST
+ dbenv->txn_id_set = __txn_id_set;
+#endif
+ dbenv->txn_recover = __txn_recover;
+ dbenv->txn_stat = __txn_stat;
+ dbenv->txn_begin = __txn_begin;
+ }
+}
+
+/*
+ * __txn_set_tx_max --
+ * Set the size of the transaction table.
+ */
+static int
+__txn_set_tx_max(dbenv, tx_max)
+ DB_ENV *dbenv;
+ u_int32_t tx_max;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_max");
+
+ dbenv->tx_max = tx_max;
+ return (0);
+}
+
+/*
+ * __txn_set_tx_recover --
+ * Set the transaction abort recover function.
+ */
+static int
+__txn_set_tx_recover(dbenv, tx_recover)
+ DB_ENV *dbenv;
+ int (*tx_recover) __P((DB_ENV *, DBT *, DB_LSN *, db_recops));
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_recover");
+
+ dbenv->tx_recover = tx_recover;
+ return (0);
+}
+
+/*
+ * __txn_set_tx_timestamp --
+ * Set the transaction recovery timestamp.
+ */
+static int
+__txn_set_tx_timestamp(dbenv, timestamp)
+ DB_ENV *dbenv;
+ time_t *timestamp;
+{
+ ENV_ILLEGAL_AFTER_OPEN(dbenv, "set_tx_timestamp");
+
+ dbenv->tx_timestamp = *timestamp;
+ return (0);
+}